prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import numpy as np
import pandas as pd
from dask.array import from_array, Array
from dask.delayed import Delayed
from dask.dataframe import from_pandas, Series, to_numeric
@pytest.mark.parametrize("arg", ["5", 5, "5 "])
def test_to_numeric_on_scalars(arg):
output = to_numeric(arg)
assert isinstance(output, Delayed)
assert output.compute() == 5
def test_to_numeric_on_dask_array():
arg = from_array(["1.0", "2", "-3", "5.1"])
expected = np.array([1.0, 2.0, -3.0, 5.1])
output = to_numeric(arg)
assert isinstance(output, Array)
assert list(output.compute()) == list(expected)
def test_to_numeric_on_dask_dataframe_series():
s = pd.Series(["1.0", "2", -3, -5.1])
arg = from_pandas(s, npartitions=2)
expected = pd.to_numeric(s)
output = to_numeric(arg)
assert output.dtype == "int64"
assert isinstance(output, Series)
assert list(output.compute()) == list(expected)
def test_to_numeric_on_dask_dataframe_series_with_meta():
s = | pd.Series(["1.0", "2", -3, -5.1]) | pandas.Series |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
from sklearn.metrics import accuracy_score
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
from matplotlib.patches import Rectangle
def objective(x, a, b, c):
return a * np.exp(-b * x) + c
def get_dprime_vs_acc_csv(dataset_pkl):
# open tabular dataset and extract features and labels, remove unwanted colums like image reference
dataset = pd.read_pickle(dataset_pkl)
print(dataset.columns)
features = [ele for ele in dataset.columns if ele not in {'label', 'image', 'category'}]
labels = dataset.label.unique()
data = dataset.drop(['image'], axis=1)
# consuct dprime matrix
combinations = []
for feature in features:
class_dictionary = {}
for i, label_i in enumerate(labels[:-1]):
for label_j in labels[i + 1:]:
ui = data[data['label'] == label_i][feature].mean()
uj = data[data['label'] == label_j][feature].mean()
sigmai = data[data['label'] == label_i][feature].std()
sigmaj = data[data['label'] == label_j][feature].std()
class_dictionary[label_i+'_vs_'+label_j] = np.abs((np.max([ui, uj]) - np.min([ui, uj])) / np.sqrt((sigmai ** 2 + sigmaj ** 2) / 2))
combinations.append(class_dictionary)
df = pd.DataFrame(combinations,index = features)
# compute accuracy matrix
df_acc = df.copy()
for feature in df.index:
for labels in df.columns:
# get the class labels we want to compare
labeli,labelj = labels.split('_vs_')
# get data cooresponding to class labels and the feature for classifcation
data = dataset[['label',feature]][dataset[['label',feature]].label.isin([labeli,labelj])]
# compute means of feature for each class
dd = data.groupby('label',as_index=False).mean()
mu_labeli = dd.loc[dd['label'] == labeli, feature].values[0]
mu_labelj = dd.loc[dd['label'] == labelj, feature].values[0]
#print(mu_labeli,mu_labelj)
# compute unbiased threshold
threshold = np.mean([mu_labeli, mu_labelj])
# assign predicted labels based on means
if mu_labeli < mu_labelj:
lower_label = labeli
upper_label = labelj
else:
lower_label = labelj
upper_label = labeli
y_predictions = []
y_actuals = []
for index, row in data.iterrows():
y_actuals.append(row["label"])
# make classifcation based on if feature value is above or below threshold
if row[feature] < threshold:
y_predictions.append(lower_label)
else:
y_predictions.append(upper_label)
acc = accuracy_score(y_actuals, y_predictions)
#print(labeli,'vs',labelj,':',feature, 'accuracy:',acc)
df_acc.loc[feature, labels] = acc
F = []
T=[]
A=[]
D =[]
for f in features:
for label in df.columns:
A.append(df_acc[label][f])
D.append(df[label][f])
F.append(f)
T.append(label)
df_results = pd.DataFrame(
{
"Tasks": T,
"Parameters": F,
"Dprime": D,
"Accuracy": A,
},
)
print(df_results)
return df_results.to_csv().encode('utf-8')
def calculate_dprime_and_acc_matrix(dataset_pkl):
# open tabular dataset and extract features and labels, remove unwanted colums like image reference
dataset = | pd.read_pickle(dataset_pkl) | pandas.read_pickle |
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
from matplotlib import cm
from matplotlib import colors
import monet as m
import numpy as np
import pandas as pd
import wrf as wrfpy
import xarray as xr
def get_proj(ds):
"""
Extracts information about the CMAQ grid projection from the proj4_srs attribute
in an output file dataset.
Parameters
----------
:param ds: `xarray.Dataset`
CMAQ dataset containing the proj4_srs attribute with grid projection information.
:return cartopy_crs: cartopy coordinate reference system
"""
proj_params = ds.proj4_srs
proj_params = proj_params.replace(' ', '')
proj_params = proj_params.split('+')
proj = proj_params[1].split('=')[1]
truelat1 = float(proj_params[2].split('=')[1])
truelat2 = float(proj_params[3].split('=')[1])
central_latitude = float(proj_params[4].split('=')[1])
central_longitude = float(proj_params[5].split('=')[1])
if proj == 'lcc':
cartopy_crs = ccrs.LambertConformal(central_longitude=central_longitude,
central_latitude=central_latitude,
standard_parallels=[truelat1, truelat2])
return cartopy_crs
else:
raise ValueError('Your projection is not the expected Lambert Conformal.')
def get_domain_boundary(ds, cartopy_crs):
"""
Finds the boundary of the CMAQ or WRF domain.
Parameters
----------
:param ds: `xarray.Dataset`
Dataset from WRF or CMAQ containing a latitude and longitude coordinate.
Note that currently this function only works if the latitude coordinate
is either named "latitude" or "XLAT" and the longitude coordinate is
either named "longitude" or "XLONG."
:param cartopy_crs: `cartopy.crs.CRS`
Cartopy coordinate reference system.
:return projected_bounds: list
Bounds of the domain transformed into the specified coordinate reference
system.
"""
# Rename the lat-lon corrdinates to get wrf-python to recognize them
variables = {'latitude': 'XLAT',
'longitude': 'XLONG'}
try:
ds = xr.Dataset.rename(ds, variables)
except ValueError:
print(f'Variables {variables} cannot be renamed, '
f'those on the left are not in this dataset.')
# I need to manually convert the boundaries of the WRF domain into Plate Carree to set the limits.
# Get the raw map bounds using a wrf-python utility
raw_bounds = wrfpy.util.geo_bounds(ds)
# Get the projected bounds telling cartopy that the input coordinates are lat/lon (Plate Carree)
projected_bounds = cartopy_crs.transform_points(ccrs.PlateCarree(),
np.array([raw_bounds.bottom_left.lon, raw_bounds.top_right.lon]),
np.array([raw_bounds.bottom_left.lat, raw_bounds.top_right.lat]))
return projected_bounds
def conc_map(plot_var, cmap=cm.get_cmap('bwr'), figsize=(8,8), ax=None, cartopy_crs=None, proj_bounds=None,
vmin=-1, vmax=1, cbar_args={}, savefig=False, figpath='conc_map.png'):
"""
Creates a filled colormap across the full domain in the native (Lambert
Conformal) map projection.
Parameters
----------
:param plot_var: `xarray.DataArray`
Array containing the variable you want to plot along with "latitude"
and "longitude" coordinates.
:param cmap: `matplotlib.colors.Colormap`
Colormap for the pcolormesh.
:param figsize: tuple
Desired figure size.
:param ax: `matplotlib.pyplot.axis`
Existing axis -- if you have one -- on which to make the plot.
:param cartopy_crs: `cartopy.crs.CRS`
Cartopy coordinate reference system.
:param proj_bounds: array-like
Domain boundaries projected into the `cartopy_crs`.
:param vmin: float
Minimum value displayed on the colorbar.
:param vmax: float
Maximum value displayed on the colorbar.
:param cbar_args: dict
Additional keyword arguments that will be passed to
`matplotlib.pyplot.colorbar`.
:param save_fig: bool
Option to save the plot.
:param figpath: string
If you choose to save the figure, this parameter controls
the output figure's name and type.
"""
if ax is None:
# Create a figure
fig = plt.figure(figsize=figsize)
# Set the GeoAxes to the projection used by WRF
ax = fig.add_subplot(1, 1, 1, projection=cartopy_crs)
# Normalize the values, so that the colorbar plots correctly
norm = colors.Normalize(vmin=vmin, vmax=vmax)
# Create the pcolormesh
cn = ax.pcolormesh(wrfpy.to_np(plot_var.longitude), wrfpy.to_np(plot_var.latitude), wrfpy.to_np(plot_var),
transform=ccrs.PlateCarree(),
cmap=cmap,
norm=norm,
)
if proj_bounds is not None:
# Format the projected bounds so they can be used in the xlim and ylim attributes
proj_xbounds = [proj_bounds[0, 0], proj_bounds[1, 0]]
proj_ybounds = [proj_bounds[0, 1], proj_bounds[1, 1]]
# Finally, set the x and y limits
ax.set_xlim(proj_xbounds)
ax.set_ylim(proj_ybounds)
# Download and add the states, coastlines, and lakes
shapename = 'admin_1_states_provinces_lakes'
states_shp = shpreader.natural_earth(resolution='10m',
category='cultural',
name=shapename)
# Add features to the maps
ax.add_geometries(
shpreader.Reader(states_shp).geometries(),
ccrs.PlateCarree(),
facecolor='none',
linewidth=.5,
edgecolor="black"
)
# Add features to the maps
# ax.add_feature(cfeature.LAKES)
# ax.add_feature(cfeature.OCEAN)
# Add color bars
if "cbar_ticks" not in cbar_args:
cbar_args["cbar_ticks"] = None
if "cbar_label" not in cbar_args:
cbar_args["cbar_label"] = 'Concentration'
if "shrink" not in cbar_args:
cbar_args["shrink"] = 1
if "pad" not in cbar_args:
cbar_args["pad"] = 0.05
cbar = plt.colorbar(cn,
ax=ax,
ticks=cbar_args["cbar_ticks"],
label=cbar_args["cbar_label"],
shrink=cbar_args["shrink"],
pad=cbar_args["pad"]
)
# Save the figure(s)
if savefig:
plt.savefig(figpath, dpi=300, transparent=True, bbox_inches='tight')
def pollution_plot(da, vmin=0, vmax=12, cmap=cm.get_cmap('YlOrBr'),
extent=None, cbar_label='PM$_{2.5}$ ($\mu g/m^{3}$)', figsize=(15,7),
titlestr='Title', savefig=False, figpath='./pollution_plot.png'):
"""
Creates a filled colormap using the Plate Carree projectsion across a
user-defined section of the domain (defaults to the full domain) using
the monit package paradigm.
Parameters
----------
:param da: `xarray.DataArray`
Array containing the variable you want to plot along with "latitude"
and "longitude" coordinates.
:param vmin: float
Minimum value displayed on the colorbar.
:param vmax: float
Maximum value displayed on the colorbar.
:param cmap: `matplotlib.colors.Colormap`
Colormap for the plot.
:param extent: list
Plot boundaries in the format [{x_min}, {x_max}, {y_min}, {y_max}].
:param cbar_label: string
Label for the colarbar.
:param figsize: tuple
Desired figure size.
:param titlestr: string
Plot title.
:param savefig: bool
Option to save the plot.
:param figpath: string
If you choose to save the figure, this parameter controls
the output figure's name and type.
"""
if extent is None:
extent = [da.longitude.min(), da.longitude.max(), da.latitude.min(), da.latitude.max() - 2]
ax = m.plots.draw_map(states=True, resolution='10m', linewidth=0.5, figsize=figsize, extent=extent, subplot_kw={'projection': ccrs.PlateCarree()})
p = da.plot(x='longitude', y='latitude', ax=ax, robust=True,
vmin=vmin, vmax=vmax, cmap=cmap,
cbar_kwargs={'label': cbar_label,
'extend': 'neither'},
)
if titlestr is not None:
ax.set_title(titlestr)
if savefig:
plt.savefig(figpath, dpi=300, transparent=True, bbox_inches='tight')
else:
plt.show()
def conc_compare(da1, da2, extent=None,
vmin1=0, vmax1=10, vmin2=-1, vmax2=1, cmap1=cm.get_cmap('YlOrBr'), cmap2=cm.get_cmap('bwr'),
cbar_label1='PM$_{2.5}$ ($\mu g/m^{3}$)', cbar_label2='PM$_{2.5}$ Difference (%)',
titlestr1=None, titlestr2=None,
figsize=(15,7), savefig=False, figpath1='./conc_compare1.png', figpath2='./conc_compare2.png'):
"""
Creates two filled colormaps for concentration comparisons.
Parameters
----------
:param da1: `xarray.DataArray`
Array containing the variable you want to plot first along with "latitude"
and "longitude" coordinates.
:param da2: `xarray.DataArray`
Array containing the variable you want to plot second along with "latitude"
and "longitude" coordinates.
:param extent: list
Plot boundaries in the format [{x_min}, {x_max}, {y_min}, {y_max}].
:param vmin1: float
Minimum value displayed on the colorbar for plot 1.
:param vmax1: float
Maximum value displayed on the colorbar for plot 1.
:param vmin2: float
Minimum value displayed on the colorbar for plot 2.
:param vmax2: float
Maximum value displayed on the colorbar for plot 2.
:param cmap1: `matplotlib.colors.Colormap`
Colormap for plot 1.
:param cmap2: `matplotlib.colors.Colormap`
Colormap for plot 2.
:param cbar_label1: string
Label for colarbar 1.
:param cbar_label2: string
Label for colarbar 2.
:param figsize: tuple
Desired figure size.
:param titlestr1: string
Plot 1 title.
:param titlestr2: string
Plot 2 title.
:param savefig: bool
Option to save the plot.
:param figpath1: string
If you choose to save figures, this parameter controls
figure 1's name and type.
:param figpath2: string
If you choose to save the figures, this parameter controls
figure 2's name and type.
"""
# f, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=fsize)
if extent is None:
extent = [da1.longitude.min(), da1.longitude.max(), da1.latitude.min(), da1.latitude.max() - 2]
f1, ax1 = m.plots.draw_map(states=True, resolution='10m', figsize=figsize, linewidth=0.5, extent=extent, subplot_kw={'projection': ccrs.PlateCarree()}, return_fig=True)
f2, ax2 = m.plots.draw_map(states=True, resolution='10m', figsize=figsize, linewidth=0.5, extent=extent, subplot_kw={'projection': ccrs.PlateCarree()}, return_fig=True)
# f.axes.append(ax2)
da1.plot(x='longitude', y='latitude', ax=ax1, robust=True,
vmin=vmin1, vmax=vmax1, cmap=cmap1,
cbar_kwargs={'label': cbar_label1, 'extend': 'neither'},
)
da2.plot(x='longitude', y='latitude', ax=ax2, robust=True,
vmin=vmin2, vmax=vmax2, cmap=cmap2,
cbar_kwargs={'label': cbar_label2, 'extend': 'neither'},
)
if titlestr1 is not None:
ax1.set_title(titlestr1)
if titlestr2 is not None:
ax2.set_title(titlestr2)
if savefig:
f1.savefig(figpath1, dpi=300, transparent=True, bbox_inches='tight')
f2.savefig(figpath2, dpi=300, transparent=True, bbox_inches='tight')
else:
f1.show()
f2.show()
def prof_change(gen_idx, gen_df1, gen_df2, date=None, column_names=['Base Case', 'w/ Renewables'],
figsize=(7,3), colors=['purple','orange'], linewidth=2, linestyles=['-','-.'],
titlestr1='', ylabelstr='Power (MW)',
savefig=False, outfile_pfix='../cmaqpy/data/plots/gen_profs_'):
"""
Plots changes in generation or emissions profiles for a user-specified unit.
Parameters
----------
:param gen_idx: int
Index for the generator that you want to plot.
:param gen_df1: `pandas.DataFrame`
DataFrame containing the generator profiles for case 1.
:param gen_df2: `pandas.DataFrame`
DataFrame containing the generator profiles for case 2.
:param date: string
Date for generator profile plotting. Defaults to None,
and plots the entire available time period.
:param column_names: list of strings
Names for case 1 and case 2. These will be written to the
plot legend.
:param figsize: tuple
Desired figure size.
:param colors: list of strings
Colors for case 1 and case 2 plots.
:param linewidth: float
Width of lines on plots.
:param linestyle: list of strings
Style of lines for case 1 and case 2.
:param titlestr1: string
Plot title.
:param ylabelstr: string
Label for y-axis.
:param savefig: bool
Option to save the plot.
:param outfile_pfix: string
If you choose to save the figure, this parameter controls
the output figure's name. The generator name will be appended.
"""
if date is None:
change_df1 = pd.concat([gen_df1.iloc[gen_idx,5:], gen_df2.iloc[gen_idx,5:]], axis=1)
else:
change_df1 = pd.concat([gen_df1.loc[gen_idx, | pd.Timestamp(f'{date} 00') | pandas.Timestamp |
"""
generic
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor, GradientBoostingRegressor
from scipy import stats
from ..util import timeout, TimeoutError
def get_regressor(x, y, n_estimators=1500, n_tries=5,
verbose=False):
"""Calculate an ExtraTreesRegressor on predictor and target variables
Parameters
----------
x : numpy.array
Predictor vector
y : numpy.array
Target vector
n_estimators : int, optional
Number of estimators to use
n_tries : int, optional
Number of attempts to calculate regression
verbose : bool, optional
If True, output progress statements
Returns
-------
classifier : sklearn.ensemble.ExtraTreesRegressor
The classifier with the highest out of bag scores of all the
attempted "tries"
oob_scores : numpy.array
Out of bag scores of the classifier
"""
if verbose:
sys.stderr.write('Getting regressor\n')
clfs = []
oob_scores = []
for i in range(n_tries):
if verbose:
sys.stderr.write('%d.' % i)
clf = ExtraTreesRegressor(n_estimators=n_estimators, oob_score=True,
bootstrap=True, max_features='sqrt',
n_jobs=1, random_state=i).fit(x, y)
clfs.append(clf)
oob_scores.append(clf.oob_score_)
clf = clfs[np.argmax(oob_scores)]
clf.feature_importances = pd.Series(clf.feature_importances_,
index=x.columns)
return clf, oob_scores
def get_boosting_regressor(x, y, verbose=False):
"""Calculate a GradientBoostingRegressor on predictor and target variables
Parameters
----------
x : numpy.array
Predictor variable
y : numpy.array
Target variable
verbose : bool, optional
If True, output status messages
Returns
-------
classifier : sklearn.ensemble.GradientBoostingRegressor
A fitted classifier of the predictor and target variable
"""
if verbose:
sys.stderr.write('Getting boosting regressor\n')
clf = GradientBoostingRegressor(n_estimators=50, subsample=0.6,
max_features=100,
verbose=0, learning_rate=0.1,
random_state=0).fit(x, y)
clf.feature_importances = pd.Series(clf.feature_importances_,
index=x.columns)
if verbose:
sys.stderr.write('Finished boosting regressor\n')
return clf
def get_unstarted_events(mongodb):
"""
get events that have not been started yet.
generator sets started to True before returning an event
Parameters
----------
mongodb : pymongo.Database
A MongoDB database object
"""
go_on = True
while go_on:
event = mongodb['list'].find_one({"started": False})
if event is None:
go_on = False
else:
event['started'] = True
mongodb['list'].save(event)
yield event
@timeout(5) # because these sometimes hang
def get_slope(x, y):
"""Get the linear regression slope of x and y
Parameters
----------
x : numpy.array
X-values of data
y : numpy.array
Y-values of data
Returns
-------
slope : float
Scipy.stats.linregress slope
"""
return stats.linregress(x, y)[0]
@timeout(5) # because these sometimes hang
def do_r(s_1, s_2, method=stats.pearsonr, min_items=12):
"""Calculate correlation ("R-value") between two vectors
Parameters
----------
s_1 : pandas.Series
Predictor vector
s_2 : pandas.Series
Target vector
method : function, optional
Which correlation method to use. (default scipy.stats.pearsonr)
min_items : int, optional
Minimum number of items occuring in both s_1 and s_2 (default 12)
Returns
-------
r_value : float
R-value of the correlation, i.e. how correlated the two inputs are
p_value : float
p-value of the correlation, i.e. how likely this correlation would
happen given the null hypothesis that the two are not correlated
Notes
-----
If too few items overlap, return (np.nan, np.nan)
"""
s_1, s_2 = s_1.dropna().align(s_2.dropna(), join='inner')
if len(s_1) <= min_items:
return np.nan, np.nan
return method(s_1, s_2)
@timeout(10) # because these sometimes hang
def get_robust_values(x, y):
"""Calculate robust linear regression
Parameters
----------
x : numpy.array
Predictor vector
y : numpy.array
Target vector
Returns
-------
intercept : float
Intercept of the fitted line
slope : float
Slope of the fitted line
t_statistic : float
T-statistic of the fit
p_value : float
p-value of the fit
"""
import statsmodels.api as sm
r = sm.RLM(y, sm.add_constant(x), missing='drop').fit()
results = r.params[0], r.params[1], r.tvalues[0], r.pvalues[0]
return results
@timeout(5)
def get_dcor(x, y):
"""Calculate distance correlation between two vectors
Uses the distance correlation package from:
https://github.com/andrewdyates/dcor
Parameters
----------
x : numpy.array
1-dimensional array (aka a vector) of the independent, predictor
variable
y : numpy.array
1-dimensional array (aka a vector) of the dependent, target variable
Returns
-------
dc : float
Distance covariance
dr : float
Distance correlation
dvx : float
Distance variance on x
dvy : float
Distance variance on y
"""
# cython version of dcor
try:
import dcor_cpy as dcor
except ImportError as e:
sys.stderr.write("Please install dcor_cpy.")
raise e
dc, dr, dvx, dvy = dcor.dcov_all(x, y)
return dc, dr, dvx, dvy
@timeout(100)
def apply_calc_rs(X, y, method=stats.pearsonr):
"""Apply R calculation method on each column of X versus the values of y
Parameters
----------
X : pandas.DataFrame
A (n_samples, n_features) sized DataFrame, assumed to be of
log-normal expression values
y : pandas.Series
A (n_samples,) sized Series, assumed to be of percent spliced-in
alternative splicing scores
method : function, optional
Which correlation method to use on each feature in X versus the
values in y
Returns
-------
r_coefficients : pandas.Series
Correlation coefficients
p_values : pandas.Series
Correlation significances (smaller is better)
See Also
--------
do_r
This is the underlying function which calculates correlation
"""
out_R = pd.Series(index=X.columns, name=y.name)
out_P = pd.Series(index=X.columns, name=y.name)
for this_id, data in X.iteritems():
x = pd.Series(data, name=this_id)
try:
r, p = do_r(x, y, method=method)
except TimeoutError:
sys.stderr.write(
"%s r timeout event:%s, gene:%s\n" % (method, y.name, x.name))
r, p = np.nan, np.nan
out_R.ix[this_id] = r
out_P.ix[this_id] = p
return out_R, out_P
@timeout(220)
def apply_calc_robust(X, y, verbose=False):
"""Calculate robust regression between the columns of X and y
Parameters
----------
X : pandas.DataFrame
A (n_samples, n_features) Dataframe of the predictor variable
y : pandas.DataFrame
A (n_samples, m_features) DataFrame of the response variable
verbose : bool, optional
If True, output status messages as the calculation is happening
Returns
-------
out_I : pandas.Series
Intercept of regressions
out_S : pandas.Series
Slope of regressions
out_T : pandas.Series
t-statistic of regressions
out_P : pandas.Series
p-values of regressions
See Also
--------
get_robust_values
This is the underlying function which calculates the slope,
intercept, t-value, and p-value of the fit
"""
if verbose:
sys.stderr.write("getting robust regression\n")
out_I = pd.Series(index=X.columns, name=y.name) # intercept
out_S = pd.Series(index=X.columns, name=y.name) # slope
out_T = | pd.Series(index=X.columns, name=y.name) | pandas.Series |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = | pd.offsets.Second(5) | pandas.offsets.Second |
import re
from datetime import datetime, timedelta
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container)
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container)
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem(): self.container[0] = 5
self.check_mutable_error(setitem)
def setslice(): self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem(): del self.container[0]
self.check_mutable_error(delitem)
def delslice(): del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert_isinstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert_isinstance(self.container.view(), FrozenNDArray)
self.assertFalse(isinstance(self.container.view(np.ndarray), FrozenNDArray))
self.assertIsNot(self.container.view(), self.container)
self.assert_numpy_array_equal(self.container, original)
# shallow copy should be the same too
assert_isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container): container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
self.assert_numpy_array_equal(original, vals)
self.assertIsNot(original, vals)
vals[0] = n
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
class Ops(tm.TestCase):
def setUp(self):
self.int_index = tm.makeIntIndex(10)
self.float_index = tm.makeFloatIndex(10)
self.dt_index = tm.makeDateIndex(10)
self.dt_tz_index = tm.makeDateIndex(10).tz_localize(tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10)
self.string_index = tm.makeStringIndex(10)
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index)
self.float_series = Series(arr, index=self.int_index)
self.dt_series = Series(arr, index=self.dt_index)
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index)
self.string_series = Series(arr, index=self.string_index)
types = ['int','float','dt', 'dt_tz', 'period','string']
self.objs = [ getattr(self,"{0}_{1}".format(t,f)) for t in types for f in ['index','series'] ]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index,op),index=o.index)
else:
expected = getattr(o,op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o,op)
# these couuld be series, arrays or scalars
if isinstance(result,Series) and isinstance(expected,Series):
tm.assert_series_equal(result,expected)
elif isinstance(result,Index) and isinstance(expected,Index):
tm.assert_index_equal(result,expected)
elif isinstance(result,np.ndarray) and isinstance(expected,np.ndarray):
self.assert_numpy_array_equal(result,expected)
else:
self.assertEqual(result, expected)
# freq raises AttributeError on an Int64Index because its not defined
# we mostly care about Series hwere anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError, otherwise
# an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
self.assertRaises(TypeError, lambda : getattr(o,op))
else:
self.assertRaises(AttributeError, lambda : getattr(o,op))
class TestIndexOps(Ops):
def setUp(self):
super(TestIndexOps, self).setUp()
self.is_valid_objs = [ o for o in self.objs if o._allow_index_ops ]
self.not_valid_objs = [ o for o in self.objs if not o._allow_index_ops ]
def test_ops(self):
tm._skip_if_not_numpy17_friendly()
for op in ['max','min']:
for o in self.objs:
result = getattr(o,op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(ordinal=getattr(o.values, op)(), freq=o.freq)
try:
self.assertEqual(result, expected)
except ValueError:
# comparing tz-aware series with np.array results in ValueError
expected = expected.astype('M8[ns]').astype('int64')
self.assertEqual(result.value, expected)
def test_nanops(self):
# GH 7261
for op in ['max','min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
self.assertEqual(getattr(obj, op)(), 2.0)
obj = klass([np.nan])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
def test_value_counts_unique_nunique(self):
for o in self.objs:
klass = type(o)
values = o.values
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
expected_s = Series(range(10, 0, -1), index=values[::-1], dtype='int64')
tm.assert_series_equal(o.value_counts(), expected_s)
if isinstance(o, DatetimeIndex):
# DatetimeIndex.unique returns DatetimeIndex
self.assertTrue(o.unique().equals(klass(values)))
else:
self.assert_numpy_array_equal(o.unique(), values)
self.assertEqual(o.nunique(), len(np.unique(o.values)))
for null_obj in [np.nan, None]:
for o in self.objs:
klass = type(o)
values = o.values
if o.values.dtype == 'int64':
# skips int64 because it doesn't allow to include nan or None
continue
if o.values.dtype == 'datetime64[ns]' and _np_version_under1p7:
# Unable to assign None
continue
# special assign to the numpy array
if o.values.dtype == 'datetime64[ns]':
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
if isinstance(o, DatetimeIndex):
expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1])
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1])
else:
expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64')
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64')
tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na)
tm.assert_series_equal(o.value_counts(), expected_s)
# numpy_array_equal cannot compare arrays includes nan
result = o.unique()
self.assert_numpy_array_equal(result[1:], values[2:])
if isinstance(o, DatetimeIndex):
self.assertTrue(result[0] is pd.NaT)
else:
self.assertTrue(pd.isnull(result[0]))
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(dropna=False), 9)
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.unique(s_values))
self.assertEqual(s.nunique(), 4)
# don't sort, have to sort after the fact as not sorting is platform-dep
hist = s.value_counts(sort=False)
hist.sort()
expected = Series([3, 1, 4, 2], index=list('acbd'))
expected.sort()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
# bins
self.assertRaises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({0.998: 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({0.998: 1.0})
tm.assert_series_equal(res1n, exp1n)
self.assert_numpy_array_equal(s1.unique(), np.array([1, 2, 3]))
self.assertEqual(s1.nunique(), 3)
res4 = s1.value_counts(bins=4)
exp4 = Series({0.998: 2, 1.5: 1, 2.0: 0, 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series({0.998: 0.5, 1.5: 0.25, 2.0: 0.0, 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan, 'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O'))
self.assertEqual(s.nunique(), 3)
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array([]))
self.assertEqual(s.nunique(), 0)
# GH 3002, datetime64[ns]
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', 'xxyyzz20100101EGG',
'xxyyww20090101EGG', 'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z', '2009-01-01 00:00:00X'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np.array(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z', '2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, DatetimeIndex):
expected = DatetimeIndex(expected)
self.assertTrue(s.unique().equals(expected))
else:
self.assert_numpy_array_equal(s.unique(), expected)
self.assertEqual(s.nunique(), 3)
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
self.assertEqual(result.index.dtype, 'datetime64[ns]')
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
self.assertEqual(unique.dtype, 'datetime64[ns]')
# numpy_array_equal cannot compare pd.NaT
self.assert_numpy_array_equal(unique[:3], expected)
self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') == pd.tslib.iNaT)
self.assertEqual(s.nunique(), 3)
self.assertEqual(s.nunique(dropna=False), 4)
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td)
result = td.value_counts()
expected_s = Series([6], index=[86400000000000])
self.assertEqual(result.index.dtype, 'int64')
| tm.assert_series_equal(result, expected_s) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python
# coding: utf-8
# # Experiments @Fischer in Montebelluna 28.02.20
# We had the oppurtunity to use the Flexometer for ski boots of Fischer with their help at Montebelluna. The idea is to validate our system acquiring simultaneously data by our sensor setup and the one from their machine. With the machine of Fischer it's possible to apply exact loads.
# We used booth our sensorized ski boots (Dynafit Hoji Pro Tour W and Dynafit TLT Speedfit). The Hoji we already used in the past for our experiments in the lab @Bz with our selfbuild experiment test bench. For the TLT Speedfit this was the first experiment.
#
# Strain gauge setup:
# - Dynafit Hoji Pro Tour: 4 pairs of strain gauges 1-4 (a=0°, b=90°)
# - Dynafit TLT Speedfit: 4 triples of strain gauges 1-4 (a=0°,b=45°,c=90°)
# As we had only a restricted time, we tested all 4 strain gauges pairs of the Hoji and only strain gauge triple 3 for TLT Speedfit. For the first time the new prototype of datalogger was running in an experiment. In addition also the first time in battery mode and not at room temperature. Unfortunately the connection of the strains to the logging system was not the best as in battery mode we don't have any possibility to control the connection to the channels yet. We'll get a solution for this the next days.
#
# Experiments (ambient temperature: 4°C):
# - #1: Hoji Pro Tour, 4a&b
# - #2: Hoji Pro Tour, 3a&b
# - #3: Hoji Pro Tour, 2a&b
# - #4: Hoji Pro Tour, 1a&b
# - #5: TLT Speedfit, 3a&b&c
#
# ATTENTION: The Hoji boot was not closed as much as the TLT. Take in consideration this when looking at force/angular displacement graph.
# In[50]:
# Importing libraries
import pandas as pd
import numpy as np
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import csv
import matplotlib.patches as mpatches #needed for plot legend
from matplotlib.pyplot import *
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png', 'pdf')
# # Machine Data: load and plot
# The boot was loaded cyclical by the machine with a maximum of F = 150N. In each single experiment 1-5 we exported the data of the last 5 cycles.
#
# In[51]:
#Loading data in df[expnr]: exprnr-> experiment 1-5 with cycle 1-5
expnr=5 #number of exp
cyclenr = 5 #number of cycle per experiment
colnr = 2*cyclenr #
dfm={}
for expnr in range(expnr):
d = {}
for i in range(cyclenr): #load data from cycle 1-5
d[expnr,i] = pd.DataFrame()
d[expnr,i] = pd.read_csv('ESP'+ str(expnr+1) + 'ciclo'+ str(i+1) +'.csv', sep='\t',header=None)
dfm[expnr]=pd.concat([d[expnr,0], d[expnr,1], d[expnr,2], d[expnr,3], d[expnr,4]], axis=1, join='inner')
dfm[expnr] = np.array(dfm[expnr]) #transform in np.array
for i in range(len(dfm[expnr])): #replace , with . and change format to float
for j in range(colnr):
dfm[expnr][i,j]=float(dfm[expnr][i,j].replace(',', '.'))
#print(dfm[1][:,0])
# In[52]:
figm, axm = plt.subplots(5, 5, figsize=(13, 11), sharex='col') #define plot settings
col_title = ['Experiment {}'.format(col) for col in range(1, 5)]
for i in range(expnr+1):
for j in range(cyclenr):
axm[j,i].plot(dfm[i][:,2*j+1],dfm[i][:,2*j])
axm[0,i].set_title('Experiment '+ str(i+1))
axm[j,0].set(ylabel='F[N] Cycle'+ str(j+1))
axm[4,i].set(xlabel='angle [°]')
plt.tight_layout()
figm.suptitle('Machine Data Plot (Hoji Pro Tour: 1-4, TLT Speedfit: 5)',fontsize=16)
figm.subplots_adjust(top=0.88)
# On the x-axis the force F is shown (max 150N) and on the y-axis the displacement angle alpha.
# In the plot above the columns are showing the experiment and the rows the single cycles. The cycles within the same experiment are quite similar (qualitative). It's cool how clear is the difference between the two different ski boot models we used. Experiment 1-4 is showing Dynafit Hoji Pro Tour and experiment 5 the Dynafit TLT Speedfit.
# # Calculate surface under curve
# To compare the energy release between Hoji and TLT we are going to calculate the surface in the closed curve.
# We can calculate an area under a curve (curve to x-axis) by integration (E = \int{M dphi}). Via interpolation of extracted points on the curve we generate a function which is integrated afterwards by trapezian rule to get the surface. By subtracting the surface of unloading from the one of loading the area between can be calculated, which corresponds the energy release.
# In[53]:
from scipy.interpolate import interp1d
from numpy import trapz
# Experiment data
x1=dfm[1][:,1] # Exp1 cycle 1 Hoji
y1=dfm[1][:,0] # Exp1 cycle 1 Hoji
x2=dfm[4][:,1] # Exp5 cycle 1 Hoji
y2=dfm[4][:,0] # Exp5 cycle 1 Hoji
ym1=np.array([-29,17,41.14,63,96,147.8]) # x points loading Hoji
xm1=np.array([-1.5,2.9,7.312,11,13.7,13.94]) # y points loading Hoji
ym2=np.array([-29,3.741,25,43.08,63,72,106,147.8]) # x points unloading Hoji
xm2=np.array([-1.5,-0.646,1.2,3.127,6.6,8.37,13.28,13.94]) # y points unloading Hoji
ym3=np.array([-28.5,-12.27,4.841,18.01,31.92,39.46,87.48,145.6]) # x points loading TLT
xm3=np.array([-2.752,-0.989,1.022,3.23,5.387,6.012,6.521,6.915]) # y point loading TLT
ym4=np.array([-28.5,2.042,26.35,41.36,51.86,56.33,93.87,145.6]) # x points unloading TLT
xm4=np.array([-2.752,-1.94,-0.43,1.524,3.76,5.625,6.24,6.915]) # y points unloading TLt
# Interpolation
f1 = interp1d(xm1, ym1)
f2 = interp1d(xm2, ym2)
f3 = interp1d(xm3, ym3)
f4 = interp1d(xm4, ym4)
# Plot of original data and interpolation
fig0, ax0 = plt.subplots(1, 2, figsize=(15, 8))
fig0.suptitle('Ski boot testing machine', fontsize=16)
#fig0.suptitle('Interpolation of experiment data 1&5 cycle 1 (left: Hoji, right: TLT)', fontsize=16)
ax0[0].plot(x1,y1) # loading Hoji
ax0[0].set_title('Hoji Pro Tour W')
#ax0[0].plot(xm2,ym2, 'o', xm2, f2(xm2), '-', xm2, f2(xm2), '--') # unloading Hoji
#ax0[0].plot(x1,y1,xm1,ym1, 'o', xm1, f1(xm1), '-') # loading Hoji
#ax0[0].plot(xm2,ym2, 'o', xm2, f2(xm2), '-', xm2, f2(xm2), '--') # unloading Hoji
ax0[0].set(xlabel='angle [°]')
ax0[0].set(ylabel='Force [N]')
ax0[1].plot(x2,y2) # loading Hoji
ax0[1].set_title('TLT Speedfit')
#ax0[1].plot(x2,y2,xm3,ym3, 'o', xm3, f3(xm3), '-') # loading Hoji
#ax0[1].plot(xm4,ym4, 'o', xm4, f4(xm4), '-', xm4, f4(xm4), '--') # unloading Hoji
ax0[1].set(xlabel='angle [°]')
ax0[1].set(ylabel='Force [N]')
plt.show()
# Calculation of area between loading and unloading curve -> Energy
area1_hoji=np.trapz(f1(xm1), xm1)
area2_hoji=np.trapz(f2(xm2), xm2)
area1_TLT=np.trapz(f3(xm3), xm3)
area2_TLT=np.trapz(f4(xm4), xm4)
energy_hoji=abs(area1_hoji-area2_hoji)
energy_TLT=abs(area1_TLT-area2_TLT)
#print('Energy release Hoji = ', energy_hoji, '[J]')
#print('Energy release TLT = ', energy_TLT, '[J]')
# # Bootsensing: load and plot
# We created a datalogger which is saving the experiment data in a .txt file on a SD card. After the experiments we took them from the SD card to our PC.
# <NAME> did an excellent work with his file reader (https://github.com/raphaFanti/multiSensor/blob/master/analysis/03.%20Experiments_200220/Analysis%20v02/datanalysis_200220-v02.ipynb) which I'm using here to load this data. I modified the col_names as we used adapted column names the last time and updated the experiment date. He implemented also a good way to store all in a big dataframe. I'll copy also this code from Raphael.
# In[54]:
# transforms a time string into a datetime element
def toDate(timeString):
hh, mm, ss = timeString.split(":")
return datetime.datetime(2020, 2, 28, int(hh), int(mm), int(ss)) # date of experiment: 28.02.20
# returns a dataframe for each sub experient
col_names = ["ID","strain1","strain2","strain3","temp","millis"] # column names from file
cols_ordered = ["time","strain1","strain2","strain3"] # order wished
cols_int = ["strain1","strain2","strain3"] # to be transformed to int columns
def getDf(fl, startTime):
# ! note that we remove the first data line for each measurement since the timestamp remains zero for two first lines
fl.readline() # line removed
line = fl.readline()
lines = []
while "Time" not in line:
cleanLine = line.rstrip()
# trick for int since parsing entire column was not working
intsLine = cleanLine.replace(".00", "")
splitedLine = intsLine.split(",")
lines.append(splitedLine)
line = fl.readline()
# create dataframe
df = pd.DataFrame(lines, columns = col_names)
# create time colum
df["time"] = df["millis"].apply(lambda x: startTime + datetime.timedelta(milliseconds = int(x)))
# drop ID, millis and temperature, and order columns
df = df.drop(["ID", "temp", "millis"], axis = 1)
df = df[cols_ordered]
# adjust types
df[cols_int] = df[cols_int].astype(int)
return df
# Load data to dataframe. As we were not working with our usually experiment protocol, I had to skip phase = bs2.
# In[55]:
filenames = ["2022823_exp1","2022848_exp2","2022857_exp3", "202285_exp4", "2022829_exp5"]
nExp = len(filenames) # we simply calculate the number of experiments
# big data frame
df = pd.DataFrame()
for i, this_file in enumerate(filenames):
# experiment counter
exp = i + 1
# open file
with open(this_file + ".TXT", 'r') as fl:
# throw away first 3 lines and get baseline 1 start time
for i in range(3):
fl.readline()
# get start time for first baseline
bl1_time = fl.readline().replace("BASELINE Time: ", "")
startTime = toDate(bl1_time)
# get data for first baseline
df_bl1 = getDf(fl, startTime)
df_bl1["phase"] = "bl1"
# get start time for experiment
exp_time = fl.readline().replace("RECORDING Time: ", "")
startTime = toDate(exp_time)
# get data for experiment
df_exp = getDf(fl, startTime)
df_exp["phase"] = "exp"
# get start time for second baseline
#bl2_time = fl.readline().replace("BASELINE Time: ", "")
#startTime = toDate(bl2_time)
# get data for second baseline
#df_bl2 = getDf(fl, startTime)
#df_bl2["phase"] = "bl2"
# create full panda
df_exp_full = pd.concat([df_bl1, df_exp])
# create experiment column
df_exp_full["exp"] = exp
df = pd.concat([df, df_exp_full])
# shift columns exp and phase to begining
cols = list(df.columns)
cols = [cols[0]] + [cols[-1]] + [cols[-2]] + cols[1:-2]
df = df[cols]
#print(df)
# In[56]:
def plotExpLines(df, exp):
fig, ax = plt.subplots(3, 1, figsize=(15, 8), sharex='col')
fig.suptitle('Experiment ' + str(exp), fontsize=16)
# fig.subplots_adjust(top=0.88)
ax[0].plot(dfExp["time"], dfExp["strain3"], 'tab:green')
ax[0].set(ylabel='strain3')
ax[1].plot(dfExp["time"], dfExp["strain1"], 'tab:red')
ax[1].set(ylabel='strain1')
ax[2].plot(dfExp["time"], dfExp["strain2"], 'tab:blue')
ax[2].set(ylabel='strain2')
ax[2].set(xlabel='time [ms]')
plt.show()
# ### Experiment 1
# In[57]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 1]['time'],df[df["exp"] == 1]['strain3'])
plt.xlabel('daytime')
plt.ylabel('4A')
plt.title('Experiment 1: 4A ')
plt.show()
# We applied 34 cycles.
# ### Experiment 2
# In[58]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 2]['time'],df[df["exp"] == 2]['strain3'])
plt.xlabel('daytime')
plt.ylabel('3A')
plt.title('Experiment 2: 3A ')
plt.show()
# # Experiment 3
# In[59]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 3]['time'],df[df["exp"] == 3]['strain3'])
plt.xlabel('daytime')
plt.ylabel('2B')
plt.title('Experiment 3: 2B ')
plt.show()
# ### Experiment 4
# In[60]:
figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 4]['time'],df[df["exp"] == 4]['strain3'])
plt.xlabel('daytime')
plt.ylabel('1A')
plt.title('Experiment 4: 1A ')
plt.show()
# ### Experiment 5
# In[61]:
fig, ax = plt.subplots(2, 1, figsize=(15, 8), sharex='col')
fig.suptitle('Experiment 5: 3B & 3C ', fontsize=16)
# fig.subplots_adjust(top=0.88)
ax[0].plot(df[df["exp"] == 5]['time'], df[df["exp"] == 5]['strain3'], 'tab:green')
ax[0].set(ylabel='3C')
ax[1].plot(df[df["exp"] == 5]['time'], df[df["exp"] == 5]['strain2'], 'tab:red')
ax[1].set(ylabel='3B')
ax[1].set(xlabel='daytime')
plt.show()
# In[62]:
#dfExp = df[df["exp"] == 3]
#plotExpLines(dfExp, 3)
# # Analysis
# Now we try to compare the data from the Flexometer of Fischer and from our Bootsensing.
# - Fischer: force F over displacement angle alpha
# - Bootsensing: deformation measured by strain gauge (resistance change) in at the moment unknown unit over time (daytime in plot shown)
# The idea now is to identify the last 5 cycles in Bootsensing data automatically and to exstract time information (t0,t). Afterwards this delta t can be applied on Fischers data to plot force F over the extracted time.
# ### Bootsensing: Cycle identification
# For Experiment 1-5 we will identfy the last 5 cycles of strain3. As the data of Fischer starts at a peak (maximum load), we will identify them also in our bootsensing data and extract the last 6 peak indexes. Applying these indices on strain3/time data we get the last 5 cycles.
#
# Find peaks: find_peaks function https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
# Find valley: with Inverse of find peaks
#
#
# In[63]:
from scipy.signal import find_peaks
import numpy as np
# Load data of Experiments 1-5
ds={} # dict for strain data -> dataformat will be changed
dt={} # time data
peaks={} # peaks
valleys={} # valleys
inv_ds={} # inverse for valleys calculation
ds_peaks={} # index of peak (used for 5-2)
ds_peaks_end={} # index of last peaks
ds_valleys_end = {} # index of last valley
ds_valleys={} # index of valley (used for 5-2)
len_valley={} # valley lenght
for i in range(1,6): # i = Experiment number
ds[i]=df[df["exp"] == i]['strain3'] #data for strain3
dt[i]=df[df["exp"] == i]['time'] # time data
ds[i]=ds[i].dropna() # drop NaN
dt[i]=dt[i].dropna()
ds[i]=ds[i].reset_index(drop=True) #reset index
dt[i]=dt[i].reset_index(drop=True)
peaks[i],_=find_peaks(ds[i],prominence=100000) # find peaks
inv_ds[i]=ds[i]*(-1) # inverse of ds
valleys[i],_=find_peaks(inv_ds[i],prominence=10000) # find valleys
for j in range(1,6): # j = cycle number
ds_valleys[j,i]=valleys[i][-1-j:-j] # selecting last 5 valleys
ds_valleys_end[j,i]=valleys[i][-1:] # select last valley
ds_valleys[j,i]=ds_valleys[j,i][0] # assign index
ds_valleys_end[j,i]=ds_valleys_end[j,i][0]
ds_peaks[j,i]=peaks[i][-1-j:-j] # selecting last 5 peaks
ds_peaks_end[j,i]=peaks[i][-1:] # select last peak
ds_peaks[j,i]=ds_peaks[j,i][0] # assign index
ds_peaks_end[j,i]=ds_peaks_end[j,i][0]
#print(ds1[1][ds_valleys[1,1]])
#Calculate cycle lengths
#for i in range(1,6):
#len_valley[e] = dt1[e][ds_valleys[1,1]] - dt1[e][ds_valleys[2,1]] #1th
#len_valley1_2[i] = dt1[ds_valley_3[i]] - dt1[ds_valley_4[i]] #2th
#len_valley2_3[i] = dt1[ds_valley_2[i]] - dt1[ds_valley_3[i]] #3th
#len_valley3_4[i] = dt1[ds_valley_1[i]] - dt1[ds_valley_2[i]] #4th
#len_valley4_5[i] = dt1[ds_valley_last_end[i]] - dt1[ds_valley_1[i]] #5th
# EXPERIMENT 1: pay attention for peaks/valley after cycles
# Now we will plot the data for strain3 for each experiment with their peaks and valleys.
# In[64]:
# Plot peaks and valleys for Exp 1-5 for strain3
fig1, ax1 = plt.subplots(5, 1, figsize=(15, 8))
fig1.subplots_adjust(top=2)
fig1.suptitle('Experiments 1-5: peaks and valleys ', fontsize=16)
for i in range(5): # i for Experiment number
ax1[i].plot(df[df["exp"] == (i+1)]['time'], df[df["exp"] == (i+1)]['strain3'], 'tab:green')
ax1[i].plot(dt[(i+1)][peaks[(i+1)]],ds[(i+1)][peaks[(i+1)]],"x") #Plot peaks with x
ax1[i].plot(dt[(i+1)][valleys[(i+1)]],ds[(i+1)][valleys[(i+1)]],"o") #Plot valleys with o
ax1[i].set(ylabel='raw signal')
ax1[i].set(xlabel='daytime')
ax1[i].set_title('Experiment'+str(i+1))
plt.tight_layout()
fig1.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# Plot last 5 cycles for Exp 1-5 for strain3
fig2, ax2 = plt.subplots(5, 1, figsize=(10, 8))
fig2.suptitle('Experiments 1-5: last 5 cycles ', fontsize=16)
for i in range(5): # i for Experiment number
ax2[i].plot(dt[(i+1)][ds_valleys[5,(i+1)]:ds_valleys_end[1,(i+1)]],ds[(i+1)][ds_valleys[5,(i+1)]:ds_valleys_end[1,(i+1)]]) # select data between 5th last and last valley
#ax2[i].plot(dt[(i+1)][ds_peaks[5,(i+1)]:ds_peaks_end[1,(i+1)]],ds[(i+1)][ds_peaks[5,(i+1)]:ds_peaks_end[1,(i+1)]])# select data between 5th last and last peak
ax2[i].set(ylabel='raw signal')
ax2[i].set(xlabel='daytime')
ax2[i].set_title('Experiment'+str(i+1))
plt.tight_layout()
fig2.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
#plt.axvline(x=dt[ds_valley_2_index],color="grey") #time borders 3th cycle
#plt.axvline(x=dt[ds_valley_3_index],color="grey")
#plt.axhline(y=ds[ds_valley_3_index],color="red") # h line
# For Experiment 2-5 the last 5 cycles are clear. The signal of experiment 1 is raising again after the cyclic loading as it's not possible to select the last 5 cycles with this "peaks" method, but happily we can extract still the last cycle.
# As we can see in the plot of the last 5 cycles above, the last cycle for Exp1, Exp3 and Exp5 is ending with a peak where Exp2 and Exp4 is ending with a valley. We can say this from the plots as we know from our exported machine data that a cycle ends always with the maximum force of 150N. This means a valley or peak for our bootsensing system.
# ### Match Fischer Data with Bootsensing cycle time
# Now we are going to match the Bootsensing cycle time with the force data of Fischer for each experiment 1-5. As the machine of Fischer applied the load with a frequency of 0.33 Hz, the cycle length of each cycle should be approximately t=3s. We verified this calculating the length between 2 neighbour valley of our bootsensing data (see code above).
# In[65]:
#Identify frequency of Fischer Dataacquisition
f={} # Fischer force matrix
freq={} # matrix with vector lenght to identify frequency
for i in range(5): #
f[i] = dfm[i][:,2*i] # load force data for Exp5, strain3 0,2,4,6,8
freq[i] = len(dfm[i][:,2*i]) # force vector len
#Create time linspace for Fischer data
#Timestamp can not be selected by item, done without manually
time_start1=dt[1][ds_peaks[5,1]] # Exp1: select manually last cycle
time_end1=dt[1][ds_peaks[4,1]]
time_start2=dt[2][ds_valleys[5,2]] # Exp2
time_end2=dt[2][ds_valleys[4,2]]
time_start3=dt[3][ds_peaks[5,3]] # Exp3
time_end3=dt[3][ds_peaks[4,3]]
time_start4=dt[4][ds_valleys[5,4]] # Exp4
time_end4=dt[4][ds_valleys[4,4]]
time_start5=dt[5][ds_peaks[5,5]] # Exp5
time_end5=dt[5][ds_peaks[4,5]]
#print(time_start1,time_end1)
x1=pd.date_range(time_start1, time_end1, periods=freq[0]).to_pydatetime()
x2=pd.date_range(time_start2, time_end2, periods=freq[1]).to_pydatetime()
x3=pd.date_range(time_start3, time_end3, periods=freq[2]).to_pydatetime()
x4=pd.date_range(time_start4, time_end4, periods=freq[3]).to_pydatetime()
x5=pd.date_range(time_start5, time_end5, periods=freq[4]).to_pydatetime()
#Plot Fischer Data in timerange x
fig3, ax3 = plt.subplots(5, 2, figsize=(12, 10))
fig3.suptitle('Experiments 1-5: Fischer F over Bootsensing daytime (left), Bootsensing cycle (right) ', fontsize=16)
ax3[0,0].plot(x1,f[0])
ax3[0,0].set(xlabel='daytime')
ax3[0,0].set(ylabel='F[N]')
ax3[0,0].set_title('Experiment 1')
ax3[1,0].plot(x2,f[1])
ax3[1,0].set(xlabel='daytime')
ax3[1,0].set(ylabel='F[N]')
ax3[1,0].set_title('Experiment 2')
ax3[2,0].plot(x3,f[2])
ax3[2,0].set(xlabel='daytime')
ax3[2,0].set(ylabel='F[N]')
ax3[2,0].set_title('Experiment 3')
ax3[3,0].plot(x4,f[3])
ax3[3,0].set(xlabel='daytime')
ax3[3,0].set(ylabel='F[N]')
ax3[3,0].set_title('Experiment 4')
ax3[4,0].plot(x5,f[4])
ax3[4,0].set(xlabel='daytime')
ax3[4,0].set(ylabel='F[N]')
ax3[4,0].set_title('Experiment 5')
#for i in range(1,5): # Exp2-5
#ax3[i,1].plot(dt[i+1][ds_peaks[2,i+1]:ds_peaks[1,i+1]],ds[i+1][ds_peaks[2,i+1]:ds_peaks[1,i+1]])
#ax3[i,1].set(ylabel='strain3')
#ax3[i,1].set(xlabel='daytime')
ax3[0,1].plot(dt[1][ds_peaks[5,1]:ds_peaks[4,1]],ds[1][ds_peaks[5,1]:ds_peaks[4,1]]) # special for Exp1 with peaks
ax3[0,1].set(xlabel='daytime')
ax3[0,1].set(ylabel='4A')
ax3[1,1].plot(dt[2][ds_valleys[5,2]:ds_valleys[4,2]],ds[2][ds_valleys[5,2]:ds_valleys[4,2]]) #Exp2 with valleys
ax3[1,1].set(xlabel='daytime')
ax3[1,1].set(ylabel='3A')
ax3[2,1].plot(dt[3][ds_peaks[5,3]:ds_peaks[4,3]],ds[3][ds_peaks[5,3]:ds_peaks[4,3]]) #Exp3 with peaks
ax3[2,1].set(xlabel='daytime')
ax3[2,1].set(ylabel='2B')
ax3[3,1].plot(dt[4][ds_valleys[5,4]:ds_valleys[4,4]],ds[4][ds_valleys[5,4]:ds_valleys[4,4]]) # Exp4 with valley
ax3[3,1].set(xlabel='daytime')
ax3[3,1].set(ylabel='1A')
ax3[4,1].plot(dt[5][ds_peaks[5,5]:ds_peaks[4,5]],ds[5][ds_peaks[5,5]:ds_peaks[4,5]]) #Exp5 with peaks
ax3[4,1].set(xlabel='daytime')
ax3[4,1].set(ylabel='3B')
plt.tight_layout()
fig3.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# In the graphs of Fischer data (left side) you can note a little kink in unloading as well as in loading. In experiment 5 (TLT) the kink is much more prominent.
# ATTENTION: As we verified the length between neighbour valleys as well as neighbour peaks in our bootsensing data, we can confirm the freqeuncy of f=0.33 Hz applied by the machine (see plots below).
# ### Time delta Fischer&bootsensing
# Now we're going to find identify the extrema for Fischer force data and out bootsensing strain data for each single Experiment 1-5. As we applied the same timespan on the x-axis for both plot we can compare the x-coordinate of the left plot with the corresponding right one to check the response time (time delay) of our bootsensing system (like reaction time of strain gauges).
# In[66]:
# Find extrema in Fischer F for Exp 1-5 in last cycle
inv_f={} # inverse of F
valleys_f={} # valleys in Fischer F
fmin={} # f for extrema
for i in range(5): # find extrema (in this case valley)
inv_f[i]=f[i]*(-1) # inverse of f
valleys_f[i],_=find_peaks(inv_f[i],prominence=10) # find valleys
fmin[i]=f[i][valleys_f[i]] # y-coordinate for minima
# x-coordinate for minima
x1min=x1[valleys_f[0]] #Exp1
x2min=x2[valleys_f[1]] #Exp2
x3min=x3[valleys_f[2]] #Exp3
x4min=x4[valleys_f[3]] #Exp4
x5min=x5[valleys_f[4]] #Exp5
# Find extrema in bootsensing data for Exp 1-5 in last cycle
# extract time and strain for last cycle Exp1-5 (manually)
t1=dt[1][ds_peaks[5,1]:ds_peaks[4,1]] # Exp1 -> valley
t1=t1.reset_index(drop=True) # reset index
ds1=ds[1][ds_peaks[5,1]:ds_peaks[4,1]]
ds1=ds1.reset_index(drop=True)
t2=dt[2][ds_valleys[5,2]:ds_valleys[4,2]] # Exp2 -> peak
t2=t2.reset_index(drop=True)
ds2=ds[2][ds_valleys[5,2]:ds_valleys[4,2]]
ds2=ds2.reset_index(drop=True)
t3=dt[3][ds_peaks[5,3]:ds_peaks[4,3]] # Exp3 -> valley
t3=t3.reset_index(drop=True)
ds3=ds[3][ds_peaks[5,3]:ds_peaks[4,3]]
ds3=ds3.reset_index(drop=True)
t4=dt[4][ds_valleys[5,4]:ds_valleys[4,4]] # Exp4 -> peak
t4=t4.reset_index(drop=True)
ds4=ds[4][ds_valleys[5,4]:ds_valleys[4,4]]
ds4=ds4.reset_index(drop=True)
t5=dt[5][ds_peaks[5,5]:ds_peaks[4,5]] # Exp5 -> valley
t5=t5.reset_index(drop=True)
ds5=ds[5][ds_peaks[5,5]:ds_peaks[4,5]]
ds5=ds5.reset_index(drop=True)
# Find valley for Exp1,3,5
valley_ds1,_=find_peaks(ds1*(-1)) # Exp1
valley_ds3,_=find_peaks(ds3*(-1)) # Exp3
valley_ds5,_=find_peaks(ds5*(-1)) # Exp5
# Find peak for Exp2,4
peak_ds2,_=find_peaks(ds2) # Exp2
peak_ds4,_=find_peaks(ds4) # Exp4
# Apply extrema index on x-coordinate of bootsensing for Exp1-5
t1ext=t1[valley_ds1].dt.to_pydatetime() # converting in same format as xmin
t2ext=t2[peak_ds2].dt.to_pydatetime()
t3ext=t3[valley_ds3].dt.to_pydatetime()
t4ext=t4[peak_ds4].dt.to_pydatetime()
t5ext=t5[valley_ds5].dt.to_pydatetime()
#Calculating timedelta in format to_pydatetime()
deltat1=t1ext-x1min
deltat2=t2ext-x2min
deltat3=t3ext-x3min
deltat4=t4ext-x4min
deltat5=t5ext-x5min
print(deltat1,deltat2,deltat3,deltat4,deltat5)
# If we look at the timedelta for Exp1-5 we see that we are in range of deltat=0,007678s-0,1669s. For the setup at the moment if is enough. Maybe by increasing the data acquisition frequency we could improve this time delta.
# As we know that the machine applied the load with a frequency of f=0.33 Hz with f=1/T we can calculate the timespan of loading. Identifying the vector length of Fischer force data we can plot the force over time for each single cycle.
# In[67]:
fm=0.33 # frequency in Hz (preset)
T=1/fm #calculate time period T
fd={}
for i in range(5):
fd[i]= len(f[i])
freq=fd[0] #as all fd[i] have the same length we choose f[0]
x = np.linspace(0, T, freq, endpoint=False)
#Plot
fig4, ax4 = plt.subplots(5, 1, figsize=(6, 8))
fig4.suptitle('Experiments 1-5: Fischer F over time t ', fontsize=16)
for i in range(5):
ax4[i].plot(x,f[i])
ax4[i].set(xlabel='daytime')
ax4[i].set(ylabel='F[N]')
ax4[i].set_title('Experiment '+str(i+1))
plt.tight_layout()
fig4.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# In[68]:
# Plot an example experiment with peaks and valleys for thesis
fig5, ax5 = plt.subplots(1, figsize=(15, 8))
#fig5.subplots_adjust(top=2)
#fig5.suptitle('Experiments 1-5: peaks and valleys ', fontsize=16)
ax5.plot(df[df["exp"] == (3)]['time'], df[df["exp"] == (3)]['strain3'], 'tab:blue',label='strain gauge 2b')
ax5.plot(dt[(3)][peaks[(3)]],ds[(3)][peaks[(3)]],"rx",label='peak') #Plot peaks with x
ax5.plot(dt[(3)][valleys[(3)]],ds[(3)][valleys[(3)]],"ro",label='valley') #Plot valleys with o
ax5.set(ylabel='raw signal')
ax5.set(xlabel='daytime')
ax5.set_title('Cyclic loading of TLT Speedfit')
ax5.legend()
plt.tight_layout()
fig5.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# # Machine force and strain data matching
# In[69]:
from datetime import timedelta
# Select strain 4A (stored in strain3) and machine data for Experiment 1
data_s1=pd.concat([dt[1][ds_peaks[5,1]:ds_peaks[4,1]], ds[1][ds_peaks[5,1]:ds_peaks[4,1]]],axis=1).reset_index(drop=True) # one dataframe with strain and time
# Select strain 3C (stored in strain3) and machine data for Experiment 5
data_s5C=pd.concat([dt[5][ds_peaks[5,5]:ds_peaks[4,5]],ds[5][ds_peaks[5,5]:ds_peaks[4,5]]],axis=1).reset_index(drop=True) # one dataframe with strain and time
# Convert machine time to DataFrame in ms precision
x1=pd.DataFrame(x1,columns=['time']).astype('datetime64[ms]') # Experiment 1
x5=pd.DataFrame(x5,columns=['time']).astype('datetime64[ms]') # Experiment 5
# Convert machine force data to DataFrame
f1= | pd.DataFrame(f[0],columns=['force [N]']) | pandas.DataFrame |
# Place this file in src/ of the DIPS folder preprocessed as in
# https://github.com/amorehead/DIPS-Plus up to and including the step prune_pairs.py (but not beyond that step).
import logging
import os
import random
from pathlib import Path
import atom3.pair as pa
import click
import pandas as pd
from atom3 import database as db
from tqdm import tqdm
from project.utils.constants import DB5_TEST_PDB_CODES, ATOM_COUNT_LIMIT
@click.command()
@click.argument('output_dir', default='../DIPS/final/raw', type=click.Path())
def main(output_dir: str):
"""Partition dataset filenames."""
filter_by_atom_count = True
max_atom_count = 10000
logger = logging.getLogger(__name__)
logger.info(f'Writing filename DataFrames to their respective text files')
# Make sure the output_dir exists
if not os.path.exists(output_dir):
os.mkdir(output_dir)
print('pairs-postprocessed.txt start')
pairs_postprocessed_txt = os.path.join(output_dir, 'pairs-postprocessed.txt')
if os.path.exists(pairs_postprocessed_txt):
print('pairs-postprocessed.txt exists, skipping ...')
else:
open(pairs_postprocessed_txt, 'w').close() # Create pairs-postprocessed.txt from scratch each run
# Record dataset filenames conditionally by sequence length (if requested - otherwise, record all)
pair_filenames = [pair_filename for pair_filename in Path(output_dir).rglob('*.dill')]
for pair_filename in tqdm(pair_filenames):
struct_id = pair_filename.as_posix().split(os.sep)[-2]
if filter_by_atom_count:
postprocessed_pair: pa.Pair = pd.read_pickle(pair_filename)
if len(postprocessed_pair.df0) < max_atom_count and len(postprocessed_pair.df1) < max_atom_count:
with open(pairs_postprocessed_txt, 'a') as f:
path, filename = os.path.split(pair_filename.as_posix())
filename = os.path.join(struct_id, filename)
f.write(filename + '\n') # Pair file was copied
else:
with open(pairs_postprocessed_txt, 'a') as f:
path, filename = os.path.split(pair_filename.as_posix())
filename = os.path.join(struct_id, filename)
f.write(filename + '\n') # Pair file was copied
print('pairs-postprocessed.txt done')
# Prepare files
pairs_postprocessed_train_txt = os.path.join(output_dir, 'pairs-postprocessed-train.txt')
if not os.path.exists(pairs_postprocessed_train_txt): # Create train data list if not already existent
open(pairs_postprocessed_train_txt, 'w+').close()
pairs_postprocessed_val_txt = os.path.join(output_dir, 'pairs-postprocessed-val.txt')
if not os.path.exists(pairs_postprocessed_val_txt): # Create val data list if not already existent
open(pairs_postprocessed_val_txt, 'w+').close()
pairs_postprocessed_test_txt = os.path.join(output_dir, 'pairs-postprocessed-test.txt')
if not os.path.exists(pairs_postprocessed_test_txt): # Create test data list if not already existent
open(pairs_postprocessed_test_txt, 'w+').close()
# Write out training-validation partitions for DIPS
output_dirs = [filename
for filename in os.listdir(output_dir)
if os.path.isdir(os.path.join(output_dir, filename))]
random.shuffle(output_dirs)
train_dirs = output_dirs[:-40]
val_dirs = output_dirs[-40:-20]
test_dirs = output_dirs[-20:]
# Ascertain training and validation filename separately
filenames_frame = | pd.read_csv(pairs_postprocessed_txt, header=None) | pandas.read_csv |
import pandas as pd
import numpy as np
import json
import pycountry_convert as pc
from ai4netmon.Analysis.aggregate_data import data_collectors as dc
from ai4netmon.Analysis.aggregate_data import graph_methods as gm
FILES_LOCATION = 'https://raw.githubusercontent.com/sermpezis/ai4netmon/main/data/misc/'
PATH_AS_RANK = FILES_LOCATION+'ASrank.csv'
PATH_PERSONAL = FILES_LOCATION+'perso.txt'
PATH_PEERINGDB = FILES_LOCATION+'peeringdb_2_dump_2021_07_01.json'
AS_HEGEMONY_PATH = FILES_LOCATION+'AS_hegemony.csv'
ALL_ATLAS_PROBES = FILES_LOCATION+'RIPE_Atlas_probes.json'
ROUTEVIEWS_PEERS = FILES_LOCATION+'RouteViews_peers.json'
AS_RELATIONSHIPS = FILES_LOCATION+'AS_relationships_20210701.as-rel2.txt'
def cc2cont(country_code):
'''
Receives a country code ISO2 (e.g., 'US') and returns the corresponding continent name (e.g., 'North America').
Exceptions:
- if 'EU' is given as country code (it happened in data), then it is treated as the continent code
- if the country code is not found, then a None value is returned
:param country_code: (str) ISO2 country code
:return: (str) continent name of the given country(-ies)
'''
if country_code in ['EU']:
continent_code = country_code
else:
try:
continent_code = pc.country_alpha2_to_continent_code(country_code)
except KeyError:
return None
continent_name = pc.convert_continent_code_to_continent_name(continent_code)
return continent_name
def get_continent(country_code):
'''
Receives a series of country codes ISO2 (e.g., 'US') and returns the corresponding continent names (e.g., 'North America').
For NaN or None elements, it returns a None value
:param country_code: (pandas Series) ISO2 country codes
:return: (list of str) continent names of the given countries
'''
continent_name = []
for cc in country_code.tolist():
if | pd.isna(cc) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 1 00:49:21 2018
@author: teo
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 8 10:32:18 2018
@author: teo
"""
import pandas as pd
from plotly import tools
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.graph_objs as go
import cufflinks as cf
cf.go_offline()
from plotly.graph_objs import *
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
#init_notebook_mode(connected=True)
data=pd.read_csv("C:\\Users\\teo\Downloads\\HR_comma_sep.csv",header=0)
# ------------------------------ Pre-Processing---------------------------------------------
data_viz=pd.DataFrame(data)
# Check for null columns
data_list=[data]
for dataset in data_list:
print("+++++++++++++++++++++++++++")
print(pd.isnull(dataset).sum() >0)
print("+++++++++++++++++++++++++++")
#We dont have missing values
xx=["other","sales","accounting","hr","technical","support","product_mng","marketing"]
for dataset in data_list[:]:
#Mapping departments
dep_mapping={"sales":1,"accounting":2,"hr":3,"technical":4,"support":5,"product_mng":6,"marketing":7}
dataset['sales']=dataset['sales'].map(dep_mapping)
dataset['sales']=dataset['sales'].fillna(0)# for other deparments RandD,IT
#Mapping salary
salary_mapping={'low':1,'medium':2,'high':3}
dataset['salary']=dataset['salary'].map(salary_mapping)
#Mapping monthly average hours
dataset.loc[dataset['average_montly_hours']<=100,'average_montly_hours'] =0
dataset.loc[(dataset['average_montly_hours']>100) & (dataset['average_montly_hours'] <=150),'average_montly_hours']=1
dataset.loc[(dataset['average_montly_hours']>150) & (dataset['average_montly_hours'] <=250),'average_montly_hours']=2
dataset.loc[(dataset['average_montly_hours']>250) & (dataset['average_montly_hours']<=300),'average_montly_hours'] =3
dataset.loc[dataset['average_montly_hours']>300,'average_montly_hours'] =4
#Mapping time spend company
dataset.loc[dataset['time_spend_company']<=3,'time_spend_company'] =3
dataset.loc[(dataset['time_spend_company']>3) & (dataset['time_spend_company'] <=6),'time_spend_company']=6
dataset.loc[dataset['time_spend_company']>6 ,'time_spend_company'] =9
#Mapping last evaluation
dataset.loc[dataset['last_evaluation']<=0.25,'last_evaluation'] =0
dataset.loc[(dataset['last_evaluation']>0.25) & (dataset['last_evaluation']<=0.5),'last_evaluation'] =1
dataset.loc[(dataset['last_evaluation']>0.5) & (dataset['last_evaluation'] <=0.75),'last_evaluation']=2
dataset.loc[dataset['last_evaluation']>0.75 ,'last_evaluation'] =3
#Mapping satisfaction level
dataset.loc[dataset['satisfaction_level']<=0.25,'satisfaction_level'] =0
dataset.loc[(dataset['satisfaction_level']>0.25) & (dataset['satisfaction_level'] <=0.5),'satisfaction_level']=1
dataset.loc[(dataset['satisfaction_level']>0.5) & (dataset['satisfaction_level']<=0.75),'satisfaction_level'] =2
dataset.loc[dataset['satisfaction_level']>0.75 ,'satisfaction_level'] =3
#convert list to dataframe
features = dataset.dtypes.index
dataDF = pd.DataFrame() #creates a new dataframe that's empty
dataDF = dataDF.append(data_list, ignore_index = False)
features = list(dataDF.columns[:10])
features.pop(6)
from sklearn import preprocessing
y = dataDF["left"]
X = dataDF[features]
X=preprocessing.scale(X)
X=pd.DataFrame(X)
init_notebook_mode(connected=True)
#cf.set_config_file(offline=False, world_readable=True, theme='ggplot')
df_plot_helper_salary=data_viz.groupby("salary")["left"].sum()
df_plot_helper_department=data_viz.groupby("sales")["left"].sum()
df_plot_helper_left=data_viz.groupby("left")["left"].sum()
salary_size=data_viz.groupby("salary").size()
sales_size=data_viz.groupby("sales").size()
df_plot_helper_department2=sales_size-df_plot_helper_department
df_plot_helper_salary2=salary_size-df_plot_helper_salary
#rename the sales column
data['sales']=data.rename(columns={'sales':'department'},inplace=True)
data=data.drop(['sales'],1)
#visualziation of the dependent variable left
a=data_viz.query('left == "0"')
#df_plot_helper_department2=a.groupby("sales")["sales"].sum().apply(lambda x: '%.3f' % x)
import plotly
plotly.offline.init_notebook_mode()
###############Department Barchart##############
x1=[0,1,2,3,4,5,6,7,]
y1=pd.DataFrame({'email':df_plot_helper_salary.index, 'list':df_plot_helper_salary.values})
y2=pd.DataFrame({'email':df_plot_helper_salary2.index, 'list':df_plot_helper_salary2.values})
y11=pd.DataFrame({'email':df_plot_helper_department.index, 'list':df_plot_helper_department.values})
y22= | pd.DataFrame({'email':df_plot_helper_department2.index, 'list':df_plot_helper_department2.values}) | pandas.DataFrame |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.tools.post_processing.post_processing_factory import PostProcessingFactory
from sos_trades_core.study_manager.study_manager import StudyManager
from os.path import join, dirname
from numpy import asarray, arange, array
import pandas as pd
import numpy as np
from sos_trades_core.execution_engine.func_manager.func_manager import FunctionManager
from sos_trades_core.execution_engine.func_manager.func_manager_disc import FunctionManagerDisc
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
def update_dspace_with(dspace_dict, name, value, lower, upper):
''' type(value) has to be ndarray
'''
if not isinstance(lower, (list, np.ndarray)):
lower = [lower] * len(value)
if not isinstance(upper, (list, np.ndarray)):
upper = [upper] * len(value)
dspace_dict['variable'].append(name)
dspace_dict['value'].append(value.tolist())
dspace_dict['lower_bnd'].append(lower)
dspace_dict['upper_bnd'].append(upper)
dspace_dict['dspace_size'] += len(value)
def update_dspace_dict_with(dspace_dict, name, value, lower, upper, activated_elem=None, enable_variable=True):
if not isinstance(lower, (list, np.ndarray)):
lower = [lower] * len(value)
if not isinstance(upper, (list, np.ndarray)):
upper = [upper] * len(value)
if activated_elem is None:
activated_elem = [True] * len(value)
dspace_dict[name] = {'value': value,
'lower_bnd': lower, 'upper_bnd': upper, 'enable_variable': enable_variable, 'activated_elem': activated_elem}
dspace_dict['dspace_size'] += len(value)
class Study(StudyManager):
def __init__(self, year_start=2000, year_end=2020, time_step=1, name='', execution_engine=None):
super().__init__(__file__, execution_engine=execution_engine)
self.study_name = 'usecase'
self.macro_name = '.Macroeconomics'
self.year_start = year_start
self.year_end = year_end
self.time_step = time_step
self.nb_poles = 8
def setup_usecase(self):
setup_data_list = []
years = np.arange(self.year_start, self.year_end + 1, 1)
self.nb_per = round(self.year_end - self.year_start + 1)
# data dir
data_dir = join(
dirname(dirname(dirname(dirname(dirname(__file__))))), 'tests', 'data')
if self.year_start == 2000 and self.year_end == 2020:
data_dir = join(
dirname(dirname(dirname(dirname(dirname(__file__))))), 'tests', 'data/sectorization_fitting')
#Invest
hist_invest = pd.read_csv(join(data_dir, 'hist_invest_sectors.csv'))
agri_invest = pd.DataFrame({'years': hist_invest['years'], 'investment': hist_invest['Agriculture']})
services_invest = pd.DataFrame({'years': hist_invest['years'], 'investment': hist_invest['Services']})
indus_invest = pd.DataFrame({'years': hist_invest['years'], 'investment': hist_invest['Industry']})
#Energy
hist_energy = pd.read_csv(join(data_dir, 'hist_energy_sect.csv'))
agri_energy = pd.DataFrame({'years': hist_energy['years'], 'Total production': hist_energy['Agriculture']})
services_energy = pd.DataFrame({'years': hist_energy['years'], 'Total production': hist_energy['Services']})
indus_energy = pd.DataFrame({'years': hist_energy['years'], 'Total production': hist_energy['Industry']})
#Workforce
hist_workforce = pd.read_csv(join(data_dir, 'hist_workforce_sect.csv'))
agri_workforce = pd.DataFrame({'years': hist_workforce['years'], 'workforce': hist_workforce['Agriculture']})
services_workforce = pd.DataFrame({'years': hist_workforce['years'], 'workforce': hist_workforce['Services']})
indus_workforce = pd.DataFrame({'years': hist_workforce['years'], 'workforce': hist_workforce['Industry']})
else:
invest_init = 31.489
invest_serie = np.zeros(self.nb_per)
invest_serie[0] = invest_init
for year in np.arange(1, self.nb_per):
invest_serie[year] = invest_serie[year - 1] * 1.02
agri_invest = pd.DataFrame({'years': years, 'investment': invest_serie * 0.0187})
indus_invest = pd.DataFrame({'years': years, 'investment': invest_serie * 0.18737})
services_invest = pd.DataFrame({'years': years, 'investment': invest_serie * 0.7939})
#Energy
brut_net = 1/1.45
energy_outlook = pd.DataFrame({
'year': [2000, 2005, 2010, 2017, 2018, 2025, 2030, 2035, 2040, 2050, 2060, 2100],
'energy': [118.112,134.122 ,149.483879, 162.7848774, 166.4685636, 180.7072889, 189.6932084, 197.8418842, 206.1201182, 220.000, 250.0, 300.0]})
f2 = interp1d(energy_outlook['year'], energy_outlook['energy'])
#Find values for 2020, 2050 and concat dfs
energy_supply = f2(np.arange(self.year_start, self.year_end+1))
energy_supply_values = energy_supply * brut_net
indus_energy = pd.DataFrame({'years': years, 'Total production': energy_supply_values * 0.2894})
agri_energy = pd.DataFrame({'years': years, 'Total production': energy_supply_values * 0.2136})
services_energy = pd.DataFrame({'years': years, 'Total production': energy_supply_values * 0.37})
total_workforce_df = pd.read_csv(join(data_dir, 'workingage_population_df.csv'))
#multiply ageworking pop by employment rate
workforce = total_workforce_df['population_1570']* 0.659
workforce = workforce[:self.nb_per]
agri_workforce = pd.DataFrame({'years': years, 'workforce': workforce * 0.274})
services_workforce = pd.DataFrame({'years': years, 'workforce': workforce * 0.509})
indus_workforce = pd.DataFrame({'years': years, 'workforce': workforce * 0.217})
#Damage
damage_df = pd.DataFrame({'years': years, 'damages': np.zeros(self.nb_per), 'damage_frac_output': np.zeros(self.nb_per),
'base_carbon_price': np.zeros(self.nb_per)})
#Share invest
share_invest = np.asarray([27.0] * self.nb_per)
share_invest = | pd.DataFrame({'years':years, 'share_investment': share_invest}) | pandas.DataFrame |
#===============================================================================#
# PyGrouper - <NAME>
from __future__ import print_function
import re, os, sys, time
import itertools
import json
import logging
from time import sleep
from collections import defaultdict
from functools import partial
from math import ceil
from warnings import warn
import six
if six.PY3:
from configparser import ConfigParser
elif six.PY2:
from ConfigParser import ConfigParser
from itertools import repeat
import traceback
import multiprocessing
from copy import deepcopy as copy
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from RefProtDB.utils import fasta_dict_from_file
from . import _version
from .subfuncts import *
# from ._orig_code import timed
pd.set_option(
"display.width", 170,
"display.max_columns", 500,
)
__author__ = '<NAME>'
__copyright__ = _version.__copyright__
__credits__ = ['<NAME>', '<NAME>']
__license__ = 'BSD 3-Clause'
__version__ = _version.__version__
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
program_title = 'gpGrouper v{}'.format(__version__)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
logfilename = program_title.replace(' ', '_') + '.log'
logging.basicConfig(filename=logfilename, level=logging.DEBUG)
logging.info('{}: Initiating {}'.format(datetime.now(), program_title))
SEP = ';'
labelflag = {'none': 0, # hard coded number IDs for labels
'TMT_126': 1260,
'TMT_127_C': 1270,
'TMT_127_N': 1271,
'TMT_128_C': 1280,
'TMT_128_N': 1281,
'TMT_129_C': 1290,
'TMT_129_N': 1291,
'TMT_130_C': 1300,
'TMT_130_N': 1301,
'TMT_131': 1310,
'iTRAQ_114': 113,
'iTRAQ_114': 114,
'iTRAQ_115': 115,
'iTRAQ_116': 116,
'iTRAQ_117': 117,
'iTRAQ_118': 118,
'iTRAQ_119': 119,
'iTRAQ_121': 121,
}
flaglabel = {v:k for k,v in labelflag.items()}
E2G_COLS = ['EXPRecNo', 'EXPRunNo', 'EXPSearchNo', 'EXPLabelFLAG', 'AddedBy', 'CreationTS',
'ModificationTS', 'GeneID', 'GeneSymbol', 'Description', 'TaxonID', 'HIDs', 'PeptidePrint',
'GPGroup', 'GPGroups_All', 'ProteinGIs', 'ProteinRefs', 'ProteinGI_GIDGroups',
'ProteinGI_GIDGroupCount', 'ProteinRef_GIDGroups', 'ProteinRef_GIDGroupCount', 'IDSet', 'IDGroup',
'IDGroup_u2g', 'SRA', 'Coverage', 'Coverage_u2g', 'PSMs', 'PSMs_u2g', 'PeptideCount',
'PeptideCount_u2g', 'PeptideCount_S', 'PeptideCount_S_u2g', 'AreaSum_u2g_0', 'AreaSum_u2g_all',
'AreaSum_max', 'AreaSum_dstrAdj', 'GeneCapacity', 'iBAQ_dstrAdj']
DATA_COLS = ['EXPRecNo', 'EXPRunNo', 'EXPSearchNo',
'Sequence', 'PSMAmbiguity', 'Modifications',
'ActivationType', 'DeltaScore', 'DeltaCn',
'Rank', 'SearchEngineRank', 'PrecursorArea',
'q_value', 'PEP', 'IonScore',
'MissedCleavages', 'IsolationInterference', 'IonInjectTime',
'Charge', 'mzDa', 'MHDa',
'DeltaMassDa', 'DeltaMassPPM', 'RTmin',
'FirstScan', 'LastScan', 'MSOrder', 'MatchedIons',
'SpectrumFile', 'AddedBy',
'oriFLAG',
'CreationTS', 'ModificationTS', 'GeneID',
'GeneIDs_All', 'GeneIDCount_All',
'ProteinGIs',
'ProteinGIs_All', 'ProteinGICount_All',
'ProteinRefs',
'ProteinRefs_All', 'ProteinRefCount_All',
'HIDs', 'HIDCount_All',
'TaxonID', 'TaxonIDs_All', 'TaxonIDCount_All',
'PSM_IDG', 'SequenceModi',
'SequenceModiCount', 'LabelFLAG',
'PeptRank', 'AUC_UseFLAG', 'PSM_UseFLAG',
'Peak_UseFLAG', 'SequenceArea', 'PrecursorArea_split',
# 'RazorArea',
'PrecursorArea_dstrAdj']
_EXTRA_COLS = ['LastScan', 'MSOrder', 'MatchedIons'] # these columns are not required to be in the output data columns
try:
from PIL import Image, ImageFont, ImageDraw
imagetitle = True
except ImportError:
imagetitle = False
if six.PY2:
class DirEntry:
def __init__(self, f):
self.f = f
def is_file(self):
return os.path.isfile(self.f)
def is_dir(self):
return os.path.isdir(self.f)
@property
def name(self):
return self.f
def scandir(path='.'):
files = os.listdir('.')
for f in files:
yield DirEntry(f)
os.scandir = scandir
def _apply_df(input_args):
df, func, i, func_args, kwargs = input_args
return i, df.apply(func, args=(func_args), **kwargs)
def apply_by_multiprocessing(df, func, workers=1, func_args=None, **kwargs):
"""
Spawns multiple processes if has os.fork and workers > 1
"""
if func_args is None:
func_args = tuple()
if workers == 1 or not hasattr(os, 'fork'):
result = _apply_df((df, func, 0, func_args, kwargs,))
return result[1]
workers = min(workers, len(df)) # edge case where df has less rows than workers
workers = max(workers, 1) # has to be at least 1
# pool = multiprocessing.Pool(processes=workers)
with multiprocessing.Pool(processes=workers) as pool:
result = pool.map(_apply_df, [(d, func, i, func_args, kwargs,)
for i, d in enumerate(np.array_split(df, workers))]
)
# pool.close()
result = sorted(result, key=lambda x: x[0])
return pd.concat([x[1] for x in result])
def quick_save(df,name='df_snapshot.p', path=None, q=False):
import pickle
#import RefSeqInfo
if path:
name = path+name
#df.to_csv('test_matched.tab', index=False, sep='\t')
pickle.dump(df, open(name, 'wb'))
print('Pickling...')
if q:
print('Exiting prematurely')
sys.exit(0)
def _get_rawfile_info(path, spectraf):
if path is None:
path = '.'
if not os.path.isdir(path):
return ('not found, check rawfile path', 'not found')
for f in os.listdir(path):
if f == spectraf:
rawfile = os.path.abspath(os.path.join(path,f))
break
else:
return ('not found', 'not found')
fstats = os.stat(rawfile)
mod_date = datetime.fromtimestamp(fstats.st_mtime).strftime("%m/%d/%Y %H:%M:%S")
size = byte_formatter(fstats.st_size)
return (size, mod_date)
def _spectra_summary(spectraf, data):
""" Calculates metadata per spectra file.
The return order is as follows:
-minimum RT_min
-maximum RT_min
-min IonScore
-max IonScore
-min q_value
-max q_value
-min PEP
-max PEP
-min Area (precursor, exculding zeros)
-max Area
-PSM Count
-median DeltaMassPPM
"""
data = data[data.SpectrumFile==spectraf]
RT_min = data.RTmin.min()
RT_max = data.RTmin.max()
IonScore_min = data.IonScore.min()
IonScore_max = data.IonScore.max()
q_min = data.q_value.min()
q_max = data.q_value.max()
PEP_min = data.PEP.min()
PEP_max = data.PEP.max()
area_min = data[data.PrecursorArea!=0].PrecursorArea.min()
area_max = data.PrecursorArea.max()
count = len(data[data.PSM_UseFLAG==1])
dmass_median = data.DeltaMassPPM.median()
return(RT_min, RT_max, IonScore_min, IonScore_max, q_min, q_max,
PEP_min, PEP_max, area_min, area_max, count, dmass_median)
def spectra_summary(usrdata):
"""Summaries the spectral files included in an analysis.
Args:
usrdata: a UserData instance with the data loaded
Returns:
A pandas DataFrame with relevant columns, ready for export
if the raw files cannot be found at usrdata.rawfiledir,
then 'not found' is returned for those columns
"""
msfdata = pd.DataFrame()
# msfdata['RawFileName'] = list(set(usrdata.df.SpectrumFile.tolist()))
msfdata['RawFileName'] = sorted(usrdata.df.SpectrumFile.unique())
msfdata['EXPRecNo'] = usrdata.recno
msfdata['EXPRunNo'] = usrdata.runno
msfdata['EXPSearchNo'] = usrdata.searchno
msfdata['AddedBy'] = usrdata.added_by
msfdata['CreationTS'] = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
msfdata['ModificationTS'] = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
summary_info = msfdata.apply(lambda x:
_spectra_summary(x['RawFileName'],
usrdata.df),
axis=1)
(msfdata['RTmin_min'], msfdata['RTmin_max'], msfdata['IonScore_min'],
msfdata['IonScore_max'], msfdata['qValue_min'], msfdata['qValue_max'],
msfdata['PEP_min'], msfdata['PEP_max'], msfdata['Area_min'],
msfdata['Area_max'], msfdata['PSMCount'],
msfdata['DeltaMassPPM_med']) = zip(*summary_info)
rawfile_info = msfdata.apply(lambda x:
_get_rawfile_info(usrdata.rawfiledir,
x['RawFileName']),
axis=1)
msfdata['RawFileSize'], msfdata['RawFileTS'] = zip(*rawfile_info)
return msfdata
def get_gid_ignore_list(inputfile):
"""Input a file with a list of geneids to ignore when normalizing across taxa
Each line should have 1 geneid on it.
Use '#' at the start of the line for comments
Output a list of geneids to ignore.
"""
# Don't convert GIDs to ints,
# GIDs are not ints for the input data
return [x.strip() for x in open(inputfile, 'r') if
not x.strip().startswith('#')]
def _format_peptideinfo(row):
if len(row) == 0:
return ('', 0, '', 0, '', 0, '', 0, '', 0, ())
result = (
# ','.join(row['GeneID'].dropna().unique()),
SEP.join(str(x) for x in set(row['geneid'])),
row['geneid'].replace('', np.nan).nunique(dropna=True),
# ','.join(row['TaxonID'].dropna().unique()),
SEP.join(str(x) for x in set(row['taxon'])),
row['taxon'].replace('', np.nan).nunique(dropna=True),
# ','.join(row['ProteinGI'].dropna().unique()),
SEP.join(str(x) for x in set(row['gi'])),
row['gi'].replace('', np.nan).nunique(dropna=True),
SEP.join(str(x) for x in set(row['ref'])),
row['ref'].replace('', np.nan).nunique(dropna=True),
# ','.join(row['HomologeneID'].dropna().unique()),
SEP.join(str(x) for x in set(row['homologene'])),
row['homologene'].replace('', np.nan).nunique(dropna=True),
SEP.join(str(x) for x in row['capacity']),
# tuple(row['capacity']),
# row['capacity'].mean(),
)
return result
def _extract_peptideinfo(row, database):
return _format_peptideinfo(database.loc[row])
def combine_coverage(start_end):
start_end = sorted(copy(start_end))
# for ix, entry in enumerate(start_end[:-1]):
ix = 0
while ix < len(start_end):
try:
entry = start_end[ix]
next_entry = start_end[ix+1]
except IndexError: # done
break
if entry[1] >= next_entry[0] and entry[1] <= next_entry[1]:
start_end[ix][1] = next_entry[1]
start_end.pop(ix+1)
else:
ix += 1
return start_end
def _calc_coverage(seqs, pepts):
pepts = sorted(pepts, key=lambda x: min(y+len(x) for y in [s.find(x) for s in seqs]))
coverages = list()
for s in seqs:
start_end = list()
coverage = 0
for pept in pepts:
start = 0
mark = s.find(pept.upper(), start)
while mark != -1:
start_id, end_id = mark, mark + len(pept)
start += end_id
for start_i, end_i in start_end:
if start_id < end_i and end_id > end_i:
start_id = end_i
break
elif start_id < start_i and end_id > start_i and end_id < end_i:
end_id = start_i
break
elif start_id >= start_i and end_id <= end_i:
start_id = end_id = 0
break
else:
continue
if start_id != end_id:
start_end.append( [ start_id, end_id ] )
# start_end = combine_coverage(start_end) # only need to do this if we updated this list
# coverage += end_id-start_id
mark = s.find(pept.upper(), start)
start_end = combine_coverage(start_end)
coverage = np.sum([ x[1] - x[0] for x in start_end ])
coverages.append( coverage/len(s) )
# sum(y-x)
if coverages:
return np.mean(coverages)
else:
print('Warning, GeneID', row['GeneID'], 'has a coverage of 0')
return 0
def calc_coverage_axis(row, fa, psms):
"""
Calculates total and u2g coverage for each GeneID (row) with respect to
reference fasta (fa) and peptide evidence (psms)
"""
if row['GeneID'] == '-1': # reserved for no GeneID match
return 0, 0
seqs = fa[fa.geneid == row['GeneID']]['sequence'].tolist()
if len(seqs) == 0: # mismatch
warn('When calculating coverage, GeneID {} not found in fasta file'.format(row['GeneID']))
return 0, 0
pepts = row['PeptidePrint'].split('_')
u2g_pepts = psms[ (psms.GeneID == row['GeneID']) & (psms.GeneIDCount_All == 1) ].Sequence.unique()
return _calc_coverage(seqs, pepts), _calc_coverage(seqs, u2g_pepts) if len(u2g_pepts) > 0 else 0
def calc_coverage(df, fa, psms):
res = df.pipe(apply_by_multiprocessing,
calc_coverage_axis,
workers=WORKERS,
func_args=(fa, psms),
axis=1
)
# df['Coverage'], df['Coverage_u2g'] = list(zip(res))
df['Coverage'], df['Coverage_u2g'] = list(zip(*res))
return df
def extract_peptideinfo(usrdata, database):
filter_int = partial(filter, lambda x : x.isdigit())
to_int = partial(map, int)
ixs = (usrdata.df.metadatainfo.str.strip('|')
.str.split('|')
.apply(filter_int)
.apply(to_int)
.apply(list)
# .apply(pd.Series)
# .stack()
# .to_frame()
)
# info = ixs.apply(lambda x : _format_peptideinfo(database.loc[x])).apply(pd.Series)
info = ixs.pipe(apply_by_multiprocessing,
_extract_peptideinfo,
func_args=(database,),
workers=WORKERS,
).apply(pd.Series)
info.columns = ['GeneIDs_All', 'GeneIDCount_All', 'TaxonIDs_All', 'TaxonIDCount_All', 'ProteinGIs_All',
'ProteinGICount_All', 'ProteinRefs_All', 'ProteinRefCount_All', 'HIDs', 'HIDCount_All',
'GeneCapacities']
for col in ('TaxonIDs_All', 'ProteinGIs_All', 'ProteinGICount_All',
'ProteinRefs_All', 'ProteinRefCount_All', 'HIDs', 'HIDCount_All'):
info[col] = info[col].astype('category')
info['TaxonIDCount_All'] = info['TaxonIDCount_All'].astype(np.int16)
usrdata.df = usrdata.df.join(info)
# (usrdata.df['GeneIDs_All'],
# usrdata.df['GeneIDCount_All'],
# usrdata.df['TaxonIDs_All'],
# usrdata.df['TaxonIDCount_All'],
# usrdata.df['ProteinGIs_All'],
# usrdata.df['ProteinGICount_All'],
# usrdata.df['ProteinRefs_All'],
# usrdata.df['ProteinRefCount_All'],
# usrdata.df['HIDs'],
# usrdata.df['HIDCount_All'],
# usrdata.df['GeneCapacities']) = zip(*info)
# usrdata.df['TaxonIDs_All'] = usrdata.df['TaxonIDs_All'].dropna().astype(str)
# usrdata.df['HIDs'] = usrdata.df['HIDs'].fillna('')
return 0
def gene_mapper(df, other_col=None):
if other_col is None or other_col not in df.columns:
raise ValueError("Must specify other column")
groupdf = (df[['geneid', other_col]]
.drop_duplicates()
.groupby('geneid')
)
# d = {k: SEP.join(filter(None, str(v))) for k, v in groupdf[other_col]}
d = {k: SEP.join(filter(None, map(str, v))) for k, v in groupdf[other_col]}
return d
def gene_taxon_mapper(df):
"""Returns a dictionary with mapping:
gene -> taxon
Input is the metadata extracted previously"""
return gene_mapper(df, 'taxon')
def gene_symbol_mapper(df):
"""Returns a dictionary with mapping:
gene -> taxon
Input is the metadata extracted previously"""
return gene_mapper(df, 'symbol')
def gene_desc_mapper(df):
"""Returns a dictionary with mapping:
gene -> taxon
Input is the metadata extracted previously"""
return gene_mapper(df, 'description')
def gene_hid_mapper(df):
"""Returns a dictionary with mapping:
gene -> taxon
Input is the metadata extracted previously"""
return gene_mapper(df, 'homologene')
def gene_protgi_mapper(df):
"""Returns a dictionary with mapping:
gene -> taxon
Input is the metadata extracted previously"""
return gene_mapper(df, 'gi')
def gene_protref_mapper(df):
"""Returns a dictionary with mapping:
gene -> taxon
Input is the metadata extracted previously"""
return gene_mapper(df, 'ref')
def assign_IDG(df, filtervalues=None):
filtervalues = filtervalues or dict()
ion_score_bins = filtervalues.get('ion_score_bins', (10, 20, 30))
df['PSM_IDG'] = pd.cut(df['IonScore'],
# bins=(0, *ion_score_bins, np.inf),
bins=(0,) + tuple(ion_score_bins) + (np.inf,),
labels=[7, 5, 3, 1], include_lowest=True,
right=False).astype('int')
df.loc[ df['q_value'] > .01, 'PSM_IDG' ] += 1
df.loc[ (df['IonScore'].isna() | df['q_value'].isna()), 'PSM_IDG'] = 9
return df
def make_seqlower(usrdata, col='Sequence'):
"""Make a new column called sequence_lower from a DataFrame"""
usrdata['sequence_lower'] = usrdata[col].str.lower()
return
def peptidome_matcher(usrdata, ref_dict):
if not ref_dict:
return usrdata
ref_dict_filtered = ref_dict
pmap = partial(map, str)
result = (usrdata.Sequence.str.upper().map(ref_dict)
.fillna('')
.map(pmap)
.map('|'.join)
.add('|')
)
usrdata['metadatainfo'] += result
return usrdata
def redundant_peaks(usrdata):
""" Remove redundant, often ambiguous peaks by keeping the peak
with the highest ion score"""
peaks = usrdata.sort_values(by='IonScore', ascending=False).\
drop_duplicates(subset=['SpectrumFile','SequenceModi', 'Charge', 'PrecursorArea'])
peaks['Peak_UseFLAG'] = 1
# peaks['Peak_UseFLAG'] = True
usrdata = usrdata.join(peaks['Peak_UseFLAG'])
usrdata['Peak_UseFLAG'] = usrdata.Peak_UseFLAG.fillna(0).astype(np.int8)
# usrdata['Peak_UseFLAG'] = usrdata.Peak_UseFLAG.fillna(False)
print('Redundant peak areas removed : ', len(usrdata)-len(peaks))
return usrdata
def sum_area(df):
"""Sum the area of similar peaks
New column SequenceArea is created
"""
df['Sequence_set'] = df['Sequence'].apply(lambda x: tuple(set(list(x))))
summed_area = (df.query('Peak_UseFLAG==1')
# .filter(items=['SequenceModi', 'Charge', 'PrecursorArea'])
.groupby(['SequenceModi', 'Charge'])
.agg({'PrecursorArea_split': 'sum'})
.reset_index()
.rename(columns={'PrecursorArea_split': 'SequenceArea'})
)
df = df.merge(summed_area, how='left', on=['SequenceModi', 'Charge'])
return df
def auc_reflagger(df):
"""Remove duplicate sequence areas
"""
#usrdata['Sequence_set'] = usrdata['Sequence'].apply(lambda x: tuple(set(list(x))))
no_dups = (df.sort_values(by=['SequenceModi', 'Charge', 'SequenceArea',
'PSM_IDG', 'IonScore', 'PEP', 'q_value'],
ascending=[1,1,0,1,0,1,1])
.drop_duplicates(subset=['SequenceArea', 'Charge', 'SequenceModi',])
.assign(AUC_reflagger = True)
)
df = (df.join(no_dups[['AUC_reflagger']])
.assign(AUC_reflagger = lambda x: (x['AUC_reflagger']
.fillna(0)
.astype(np.int8)))
)
return df
def export_metadata(program_title='version',usrdata=None, matched_psms=0, unmatched_psms=0,
usrfile='file', taxon_totals=dict(), outname=None, outpath='.', **kwargs):
"""Update iSPEC database with some metadata information
"""
print('{} | Exporting metadata'.format(time.ctime()))
#print('Number of matched psms : ', matched_psms)
d = dict(
version=program_title,
searchdb=usrdata.searchdb,
filterstamp=usrdata.filterstamp,
matched_psms=matched_psms,
unmatched_psms=unmatched_psms,
inputname=usrdata.datafile,
hu=taxon_totals.get('9606', 0),
mou=taxon_totals.get('10090', 0),
gg=taxon_totals.get('9031', 0),
recno=usrdata.recno,
runno=usrdata.runno,
searchno=usrdata.searchno
)
with open(os.path.join(outpath, outname), 'w') as f:
json.dump(d, f)
def split_on_geneid(df):
"""Duplicate psms based on geneids. Areas of each psm is recalculated based on
unique peptides unique for its particular geneid later.
"""
oriflag = lambda x: 1 if x[-1] == 0 else 0
glstsplitter = (df['GeneIDs_All'].str.split(SEP)
.apply(pd.Series, 1).stack()
.to_frame(name='GeneID')
.assign(oriFLAG= lambda x: x.index.map(oriflag))
)
glstsplitter.index = glstsplitter.index.droplevel(-1) # get rid of
# multi-index
df = (df.join(glstsplitter)
.reset_index())
df['GeneID'] = df.GeneID.fillna('-1')
df.loc[df.GeneID == '', 'GeneID'] = '-1'
df['GeneID'] = df.GeneID.fillna('-1')
# df['GeneID'] = df.GeneID.astype(int)
df['GeneID'] = df.GeneID.astype(str)
return df
def rank_peptides(df, area_col, ranks_only=False):
"""Rank peptides here
area_col is sequence area_calculator
ranks_only returns just the ranks column. This does not reset the original index
"""
df = df.sort_values(by=['GeneID', area_col,
'SequenceModi',
'Charge', 'PSM_IDG', 'IonScore', 'PEP',
'q_value'],
ascending=[1, 0, 0, 1, 1, 0, 1, 1])
if not ranks_only: # don't reset index for just the ranks
df.reset_index(inplace=True) # drop=True ?
df.Modifications.fillna('', inplace=True) # must do this to compare nans
df[area_col].fillna(0, inplace=True) # must do this to compare
#nans
ranks = (df[ (df.AUC_UseFLAG == 1) &
(df.PSM_UseFLAG == 1) &
(df.Peak_UseFLAG == 1) ]
.groupby(['GeneID', 'LabelFLAG'])
.cumcount() + 1) # add 1 to start the peptide rank at 1, not 0
ranks.name = 'PeptRank'
if ranks_only:
return ranks
df = df.join(ranks)
return df
def flag_AUC_PSM(df, fv, contaminant_label='__CONTAMINANT__', phospho=False):
if fv['pep'] =='all' : fv['pep'] = float('inf')
if fv['idg'] =='all' : fv['idg'] = float('inf')
df['AUC_UseFLAG'] = 1
df['PSM_UseFLAG'] = 1
df.loc[(df['Charge'] < fv['zmin']) | (df['Charge'] > fv['zmax']),
['AUC_UseFLAG', 'PSM_UseFLAG']] = 0
df.loc[df['SequenceModiCount'] > fv['modi'],
['AUC_UseFLAG', 'PSM_UseFLAG']] = 0
df.loc[(df['IonScore'].isnull() | df['q_value'].isnull()),
['AUC_UseFLAG', 'PSM_UseFLAG']] = 1, 0
df.loc[df['IonScore'] < fv['ion_score'],
['AUC_UseFLAG', 'PSM_UseFLAG']] = 0
df.loc[df['q_value'] > fv['qvalue'],
['AUC_UseFLAG', 'PSM_UseFLAG']] = 0
df.loc[df['PEP'] > fv['pep'],
['AUC_UseFLAG', 'PSM_UseFLAG']] = 0
df.loc[df['PSM_IDG'] > fv['idg'],
['AUC_UseFLAG', 'PSM_UseFLAG']] = 0
if df['PSMAmbiguity'].dtype == str:
df.loc[(df['Peak_UseFLAG'] == 0) & (df['PSMAmbiguity'].str.lower()=='unambiguous'),
['AUC_UseFLAG', 'PSM_UseFLAG']] = 1
df.loc[(df['Peak_UseFLAG'] == 0) & (df['PSMAmbiguity'].str.lower()!='unambiguous'),
['AUC_UseFLAG', 'PSM_UseFLAG']] = 0
elif any(df['PSMAmbiguity'].dtype == x for x in (int, float)):
df.loc[(df['Peak_UseFLAG'] == 0) & (df['PSMAmbiguity'] == 0),
['AUC_UseFLAG', 'PSM_UseFLAG']] = 1
df.loc[(df['Peak_UseFLAG'] == 0) & (df['PSMAmbiguity'] != 0),
['AUC_UseFLAG', 'PSM_UseFLAG']] = 0
df.loc[ df['AUC_reflagger'] == 0, 'AUC_UseFLAG'] = 0
df.loc[ df['GeneIDs_All'].fillna('').str.contains(contaminant_label), ['AUC_UseFLAG', 'PSM_UseFLAG'] ] = 0, 0
if phospho:
df.loc[ ~df['SequenceModi'].str.contains('pho', case=False), ['AUC_UseFLAG', 'PSM_UseFLAG'] ] = 0, 0
return df
def gene_taxon_map(usrdata, gene_taxon_dict):
"""make 'gene_taxon_map' column per row which displays taxon for given gene"""
usrdata['TaxonID'] = usrdata['GeneID'].map(gene_taxon_dict)
return
def get_all_taxons(taxonidlist):
"""Return a set of all taxonids from
usrdata.TaxonIDList"""
taxon_ids = set(SEP.join(x for x in taxonidlist
if x.strip()).split(SEP))
return taxon_ids
def multi_taxon_splitter(taxon_ids, usrdata, gid_ignore_list, area_col):
"""Plugin for multiple taxons
Returns a dictionary with the totals for each detected taxon"""
taxon_totals = dict()
for taxon in taxon_ids:
#all_others = [x for x in taxon_ids if x != taxon]
uniq_taxon = usrdata[
#(usrdata._data_tTaxonIDList.str.contains(taxon)) &
#(~usrdata._data_tTaxonIDList.str.contains('|'.join(all_others)))&
(usrdata['AUC_UseFLAG'] == 1) &
(usrdata['TaxonID'] == str(taxon)) &
(usrdata['TaxonIDCount_All'] == 1) &
(~usrdata['GeneID'].isin(gid_ignore_list))
]
taxon_totals[taxon] = (uniq_taxon[area_col] / uniq_taxon['GeneIDCount_All']).sum()
tot_unique = sum(taxon_totals.values()) #sum of all unique
# now compute ratio:
for taxon in taxon_ids:
taxon = str(taxon)
try:
percentage = taxon_totals[taxon] / tot_unique
except ZeroDivisionError:
warn("""This file has multiple taxa but no unique to taxa peptides.
Please check this experiment
""")
percentage = 1
taxon_totals[taxon] = percentage
print(taxon, ' ratio : ', taxon_totals[taxon])
#logfile.write('{} ratio : {}\n'.format(taxon, taxon_totals[taxon]))
return taxon_totals
def create_df(inputdf, label, inputcol='GeneID'):
"""Create and return a DataFrame with gene/protein information from the input
peptide DataFrame"""
return pd.DataFrame({'GeneID':
list(set(inputdf[inputcol])),
'EXPLabelFLAG': labelflag.get(label, label)})
def select_good_peptides(usrdata, labelix):
"""Selects peptides of a given label with the correct flag and at least one genecount
The LabelFLAG is set here for TMT/iTRAQ/SILAC data.
"""
temp_df = usrdata[((usrdata['PSM_UseFLAG'] == 1) | usrdata['AUC_UseFLAG'] ==1) &
(usrdata['GeneIDCount_All'] > 0)].copy() # keep match between runs
temp_df['LabelFLAG'] = labelix
return temp_df
def get_gene_capacity(genes_df, database, col='GeneID'):
"""Get gene capcaity from the stored metadata"""
capacity = (database.groupby('geneid').capacity.mean()
.to_frame(name='GeneCapacity'))
genes_df = genes_df.merge(capacity, how='left', left_on='GeneID', right_index=True)
return genes_df
def get_gene_info(genes_df, database, col='GeneID'):
subset = ['geneid', 'homologene', 'description', 'symbol', 'taxon']
genecapacity = (database.groupby('geneid')['capacity']
.mean()
.rename('capacity_mean')
)
geneinfo = (database[subset]
.drop_duplicates('geneid')
.set_index('geneid')
.join(genecapacity)
.rename(columns=dict(gi = 'ProteinGI',
homologene = 'HomologeneID',
taxon = 'TaxonID',
description = 'Description',
ref = 'ProteinAccession',
symbol = 'GeneSymbol',
capacity_mean = 'GeneCapacity'
))
)
# geneinfo.index = geneinfo.index.astype(str)
# geneinfo['TaxonID'] = geneinfo.TaxonID.astype(str)
out = genes_df.merge(geneinfo, how='left', left_on='GeneID', right_index=True)
return out
def get_peptides_for_gene(genes_df, temp_df):
full = (temp_df.groupby('GeneID')['sequence_lower']
.agg((lambda x: frozenset(x), 'nunique'))
.rename(columns={'<lambda>': 'PeptideSet', 'nunique': 'PeptideCount'})
# .agg(full_op)
.assign(PeptidePrint = lambda x: x['PeptideSet'].apply(sorted).str.join('_'))
)
full['PeptideSet'] = full.apply(lambda x : frozenset(x['PeptideSet']), axis=1)
q_uniq = 'GeneIDCount_All == 1'
q_strict = 'PSM_IDG < 4'
q_strict_u = '{} & {}'.format(q_uniq, q_strict)
try:
uniq = (temp_df.query(q_uniq)
.groupby('GeneID')['sequence_lower']
.agg('nunique')
.to_frame('PeptideCount_u2g'))
except IndexError:
uniq = pd.DataFrame(columns=['PeptideCount_u2g'])
try:
strict = (temp_df.query(q_strict)
.groupby('GeneID')['sequence_lower']
.agg('nunique')
.to_frame('PeptideCount_S'))
except IndexError:
strict = pd.DataFrame(columns=['PeptideCount_S'])
try:
s_u2g = (temp_df.query(q_strict_u)
.groupby('GeneID')['sequence_lower']
.agg('nunique')
.to_frame('PeptideCount_S_u2g'))
except IndexError:
s_u2g = pd.DataFrame(columns=['PeptideCount_S_u2g'])
result = pd.concat((full, uniq, strict, s_u2g), copy=False, axis=1).fillna(0)
ints = ['' + x for x in ('PeptideCount', 'PeptideCount_u2g', 'PeptideCount_S',
'PeptideCount_S_u2g')]
result[ints] = result[ints].astype(np.integer)
genes_df = genes_df.merge(result, how='left',
left_on='GeneID', right_index=True)
return genes_df
def get_psms_for_gene(genes_df, temp_df):
psmflag = 'PSM_UseFLAG'
total = temp_df.groupby('GeneID')[psmflag].sum()
total.name = 'PSMs'
q_uniq = 'GeneIDCount_All == 1'
total_u2g = (temp_df.query(q_uniq)
.groupby('GeneID')[psmflag]
.sum())
total_u2g.name = 'PSMs_u2g'
q_strict = 'PSM_IDG < 4'
total_s = (temp_df.query(q_strict)
.groupby('GeneID')[psmflag]
.sum())
total_s.name = 'PSMs_S'
q_strict_u = '{} & {}'.format(q_uniq, q_strict)
total_s_u2g = (temp_df.query(q_strict_u)
.groupby('GeneID')[psmflag]
.sum())
total_s_u2g.name = 'PSMs_S_u2g'
result = (pd.concat( (total, total_u2g, total_s, total_s_u2g), copy=False, axis=1)
.fillna(0)
.astype(np.integer))
genes_df = genes_df.merge(result, how='left',
left_on='GeneID', right_index=True)
return genes_df
def calculate_full_areas(genes_df, temp_df, area_col, normalize):
""" Calculates full (non distributed ) areas for gene ids.
calculates full, gene count normalized, unique to gene,
and unique to gene with no miscut areas.
"""
qstring = 'AUC_UseFLAG == 1'
full = temp_df.query(qstring).groupby('GeneID')[area_col].sum()/normalize
full.name = 'AreaSum_max'
# full_adj = (temp_df.query(qstring)
# .assign(gpAdj = lambda x: x[area_col] / x['GeneIDCount_All'])
# .groupby('GeneID')['gpAdj'] # temp column
# .sum()/normalize
# )
# full_adj.name = 'AreaSum_gpcAdj'
# qstring_s = qstring + ' & IDG < 4'
# strict = temp_df.query(qstring_s).groupby('GeneID')[area_col].sum()
# strict.name = ''
qstring_u = qstring + ' & GeneIDCount_All == 1'
uniq = temp_df.query(qstring_u).groupby('GeneID')[area_col].sum()/normalize
uniq.name = 'AreaSum_u2g_all'
qstring_u0 = qstring_u + ' & MissedCleavages == 0'
uniq_0 = temp_df.query(qstring_u0).groupby('GeneID')[area_col].sum()/normalize
uniq_0.name = 'AreaSum_u2g_0'
result = pd.concat( (full, uniq, uniq_0), copy=False, axis=1) .fillna(0)
genes_df = genes_df.merge(result, how='left',
left_on='GeneID', right_index=True)
return genes_df
def _distribute_area(inputdata, genes_df, area_col, taxon_totals=None, taxon_redistribute=True):
"""Row based normalization of PSM area (mapped to a specific gene).
Normalization is based on the ratio of the area of unique peptides for the
specific gene to the sum of the areas of the unique peptides for all other genes
that this particular peptide also maps to.
"""
# if inputdata.AUC_UseFLAG == 0:
# return 0
inputvalue = inputdata[area_col]
geneid = inputdata['GeneID']
gene_inputdata = genes_df.query('GeneID == @geneid')
u2g_values = gene_inputdata['AreaSum_u2g_all'].values
if len(u2g_values) == 1:
u2g_area = u2g_values[0] # grab u2g info, should always be
#of length 1
elif len(u2g_values) > 1 :
warn('DistArea is not singular at GeneID : {}'.format(
datetime.now(),inputdata['GeneID']))
distArea = 0
# this should never happen (and never has)
else :
distArea = 0
print('No distArea for GeneID : {}'.format(inputdata['GeneID']))
# taxon_ratio = taxon_totals.get(inputdata.gene_taxon_map, 1)
if u2g_area != 0 :
totArea = 0
gene_list = inputdata.GeneIDs_All.split(SEP)
all_u2gareas = (genes_df[genes_df['GeneID'].isin(gene_list)]
.query('PeptideCount_u2g > 0') # all geneids with at least 1 unique pept
.AreaSum_u2g_all)
if len(all_u2gareas) > 1 and any(x == 0 for x in all_u2gareas):
# special case with multiple u2g peptides but not all have areas, rare but does happen
u2g_area = 0 # force to distribute by gene count (and taxon percentage if appropriate)
else:
totArea = all_u2gareas.sum()
distArea = (u2g_area/totArea) * inputvalue
#ratio of u2g peptides over total area
elif all(gene_inputdata.IDSet == 3):
return 0
if u2g_area == 0: # no uniques, normalize by genecount
taxon_percentage = taxon_totals.get(str(inputdata.TaxonID), 1)
distArea = inputvalue
if taxon_percentage < 1:
distArea *= taxon_percentage
gpg_selection = genes_df.GPGroup == gene_inputdata.GPGroup.values[0]
try:
if taxon_redistribute:
taxonid_selection = genes_df.TaxonID == gene_inputdata.TaxonID.values[0]
distArea /= len( genes_df[(gpg_selection) & (taxonid_selection)])
else:
distArea /= len( genes_df[(gpg_selection)
])
except ZeroDivisionError:
pass
return distArea
def distribute_area(temp_df, genes_df, area_col, taxon_totals, taxon_redistribute=True):
"""Distribute psm area based on unique gene product area
Checks for AUC_UseFLAG==1 for whether or not to use each peak for quantification
"""
q = 'AUC_UseFLAG == 1 & GeneIDCount_All > 1'
distarea = 'PrecursorArea_dstrAdj'
temp_df[distarea] = 0
# temp_df[distarea] = (temp_df.query(q)
# .apply(
# _distribute_area, args=(genes_df,
# area_col,
# taxon_totals,
# taxon_redistribute),
# axis=1)
# )
temp_df[distarea] = (temp_df.query(q)
.pipe(apply_by_multiprocessing,
_distribute_area,
workers=WORKERS,
func_args=(genes_df, area_col, taxon_totals, taxon_redistribute),
axis=1)
)
one_id = (temp_df.GeneIDCount_All == 1) & (temp_df.AUC_UseFLAG == 1)
temp_df.loc[ one_id , distarea ] = temp_df.loc[ one_id, area_col ]
# temp_df[distarea].fillna(0, inplace=True)
return
def _set2_or_3(row, genes_df, allsets):
peptset = row.PeptideSet
# allsets = genes_df.PeptideSet.unique() # calculate outside this function for performance boost
if six.PY2 and any(set(peptset) < x for x in allsets):
return 3
elif six.PY3 and any(peptset < allsets):
return 3
# check if is set 3 across multiple genes, or is set2
gid = row.GeneID
# sel = genes_df[ (genes_df.IDSet == 1) &
# (genes_df.PeptideSet & peptset) ].query('GeneID != @gid')
sel = genes_df[(genes_df.PeptideSet & peptset) ].query('GeneID != @gid')
sel_idset1 = sel.query('IDSet == 1')
in_pop = sel.PeptideSet
in_pop_set1 = sel_idset1.PeptideSet
in_row = sel.apply( lambda x: peptset - x['PeptideSet'], axis=1 )
in_pop_all = set(in_pop.apply(tuple).apply(pd.Series).stack().unique())
if not in_pop_set1.empty:
in_pop_all_set1 = set(in_pop_set1.apply(tuple).apply(pd.Series).stack().unique())
else:
in_pop_all_set1 = set()
diff = (peptset - in_pop_all) # check if is not a subset of anything
diff_idset1 = (peptset - in_pop_all_set1) # check if is not a subset of set1 ids
if len( diff_idset1 ) == 0: # is a subset of idset1 ids
return 3
elif len( diff ) > 0: # is not a subset of anything
return 2
else:
sel_not_idset1 = sel.query('IDSet != 1')
if any(sel_not_idset1.PeptideSet == peptset):
return 2 # shares all peptides with another, and is not a subset of anything
# need to check if is a subset of everything combined, but not a subset of one thing
# ex:
# PEPTIDES
# row = A B
# match1 = A
# match2 = B
if (all( (peptset - sel.PeptideSet).apply(bool) ) and
not all( (sel_not_idset1.PeptideSet - peptset).apply(bool) )
):
return 2
else:
pept_lengths = sel_not_idset1.PeptideSet.apply(len)
if len(peptset) >= pept_lengths.max():
return 2
else:
return 3
# len_shared = sel_not_idset1.PeptideSet.apply(lambda x: x & peptset).apply(len)
# max_shared = len_shared.max()
# all_shared_pepts = (set([x for y in sel_not_idset1.PeptideSet.values for x in y])
# & peptset)
return 3
class _DummyDataFrame:
def eat_args(self, *args, **kwargs):
return None
def __getattr__(self, name):
if name not in self.__dict__:
return self.eat_args
def check_length_in_pipe(df):
"""Checks the length of a DataFrame in a pipe
and if zero returns an object to suppress all errors,
just returning None (ideally)
"""
if len(df) == 0:
return _DummyDataFrame()
return df
def assign_gene_sets(genes_df, temp_df):
all_ = genes_df.PeptideSet.unique()
allsets = genes_df.PeptideSet.unique()
genes_df.loc[genes_df.PeptideCount_u2g > 0, 'IDSet'] = 1
genes_df.loc[genes_df.PeptideCount_u2g == 0, 'IDSet'] = \
(genes_df.query('PeptideCount_u2g == 0')
.pipe(check_length_in_pipe)
# .apply(_set2_or_3, args=(genes_df, allsets), axis=1))
.pipe(apply_by_multiprocessing, _set2_or_3, genes_df=genes_df, allsets=allsets,
axis=1, workers=WORKERS)
)
genes_df['IDSet'] = genes_df['IDSet'].fillna(3).astype(np.int8)
# if u2g count greater than 0 then set 1
gpg = (temp_df.groupby('GeneID')
.PSM_IDG.min()
.rename('IDGroup'))
gpg_u2g = (temp_df.query('GeneIDCount_All==1')
.groupby('GeneID')
.PSM_IDG.min()
.rename('IDGroup_u2g'))
gpgs = (pd.concat([gpg, gpg_u2g], axis=1).fillna(0).astype(np.int8)
.assign(GeneID = lambda x: x.index)
)
genes_df = | pd.merge(genes_df, gpgs, on='GeneID', how='left') | pandas.merge |
# -*- coding: utf-8 -*-
# Loading libraries
import os
import sys
import time
from networkx.algorithms.centrality import group
import pandas as pd
import re
import csv
from swmmtoolbox import swmmtoolbox as swmm
from datetime import datetime
from os import listdir
from concurrent import futures
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
import multiprocessing
import pyproj
event_id = input('4-digit event_id like: 0123: ')
model_id = f'model_{event_id}'# + input('4-digit model_id like: 0123: ' )
precipitation_id = f'precipitation_{event_id}'# + input('4-digit raingage_id like 0123: ')
epsg_modelo = input('EPSG (ejemplo: 5348): ')
project_folder = os.path.abspath(os.path.join(os.getcwd(),"../.."))
data_raw_folder = os.path.join(project_folder,'data', 'raw_swmm')
event_folder = os.path.join(data_raw_folder, 'Run_[' + event_id + ']')
model_inp = os.path.join(event_folder, 'model.inp')
model_out = os.path.join(event_folder, 'model.out')
# Connection to database
engine_base_ina = create_engine('postgresql://postgres:[email protected]:5555/base-ina')
RELEVANT_GROUP_TYPES_OUT = [
'link',
'node',
'subcatchment',
# 'system'
]
RELEVANT_GROUP_TYPES_INP = [
'coordinates',
'subcatchments',
'raingages',
'conduits',
'orifices',
'weirs',
'outfalls',
# 'vertices',
# 'polygons',
'subareas',
# 'losses',
'infiltration',
'junctions',
'storage',
# 'properties',
# "curves",
]
RELEVANT_LINKS = [
# 'channel10944',
# 'channel24416',
# 'channel60443',
# 'channel17459',
# 'channel87859',
# 'channel14380',
# 'channel55414',
# 'channel77496',
# 'channel83013',
# 'channel52767',
# 'channel12818',
# 'conduit11698',
# 'channel6317',
# 'conduit18801',
# 'conduit50317',
# 'conduit528',
# 'conduit36611',
# 'conduit50827',
# 'conduit78108',
# 'conduit57848',
# 'conduit42638',
# 'conduit34157',
# 'conduit29340',
# 'conduit19715',
# 'conduit23023',
# 'conduit37130',
# 'conduit21772',
# 'channel52598',
# 'conduit75783',
# 'conduit62715',
# 'conduit48979',
# 'conduit82544',
# 'conduit83110',
# 'conduit33678',
# 'conduit18303',
# 'conduit40724',
# 'conduit13927'
]
RELEVANT_SUBCATCHMENTS = []
RELEVANT_NODES = []
RELEVANT_SUBAREAS = []
RELEVANT_OUTFALLS = []
RELEVANT_VERTICES = []
RELEVANT_POLYGNOS = []
RELEVANT_LINKS_CONDUITS = []
RELEVANT_LINKS_ORIFICES = []
RELEVANT_LINKS_WEIRS = []
RELEVANT_LOSSES = []
RELEVANT_INFILTRATION = []
RELEVANT_JUNCTIONS = []
RELEVANT_STORAGE = []
MODEL_OUT_COLS = {
'SUBCATCHMENTS_COLS' : [
'event_id',
'elapsed_time',
'subcatchment_id',
'rainfall',
'elapsed_time',
'snow_depth',
'evaporation_loss',
'infiltration_loss',
'runoff_rate',
'groundwater_outflow',
'groundwater_elevation',
'soil_moisture'
],
'LINKS_COLS' : [
'event_id',
'elapsed_time',
'link_id',
'flow_rate',
'flow_depth',
'flow_velocity',
'froude_number',
'capacity'
],
'NODES_COLS' : [
'event_id',
'elapsed_time',
'node_id',
'depth_above_invert',
'hydraulic_head',
'volume_stored_ponded',
'lateral_inflow',
'total_inflow',
'flow_lost_flooding'
]
}
MODEL_INP_COLS = {
'NODES_COORDINATES' : [
'node_id',
'x_coord',
'y_coord',
],
"SUBCATCHMENTS" : [
"subcatchment_id",
"raingage_id",
"outlet",
"area",
"imperv",
"width",
"slope",
"curb_len"
],
"LINKS_CONDUITS" : [
"conduit_id",
"from_node",
"to_node",
"length",
"roughness",
"in_offset",
"out_offset",
"init_flow",
"max_flow"
],
"LINKS_ORIFICES" : [
"orifice_id",
"from_node",
"to_node",
"type",
"offset",
"q_coeff",
"gated",
"close_time"
],
"LINKS_WEIRS" : [
"weir_id",
"from_node",
"to_node",
"type",
"crest_ht",
"q_coeff",
"gated",
"end_con",
"end_coeff",
"surcharge"
],
"SUBAREAS" : [
"subcatchment_id",
"n_imperv",
"n_perv",
"s_imperv",
"s_perv",
"pct_zero",
"route_to"
],
"NODES_STORAGE" : [
"storage_id",
"elevation",
"max_depth",
"init_depth",
"shape",
"curve_name_params",
"n_a",
"f_evap"
],
"NODES_OUTFALLS" : [
"outfall_id",
"elevation",
"type",
# "stage_data",
"gated",
# "route_to"
],
"NODES_JUNCTIONS" : [
"junction_id",
"elevation",
"max_depth",
"init_depth",
"sur_depth",
"aponded"
],
"INFILTRATION": [
"subcatchment_id",
"max_rate",
"min_rate",
"decay",
"dry_time",
"max_infil",
],
# "POLYGONS": [
# "subcatchment_id",
# "x_coord",
# "y_coord"
# ],
# "VERICES": [
# "link_id",
# "x_coord",
# "y_coord"
# ],
"PROPERTIES": [
"model_name",
"model_version",
"flow_units",
"infiltration",
"flow_routing",
"link_offsets",
"min_slope",
"allow_ponding",
"skip_steady_state",
"start_date",
"start_time",
"report_start_date",
"report_start_time",
"end_date",
"end_time",
"sweep_start",
"sweep_end",
"report_step",
"wet_step",
"dry_step",
"routing_step",
"inertial_damping",
"normal_flow_limited",
"force_main_equation",
"variable_step",
"lengthening_step",
"min_surfarea",
"max_trials",
"head_tolerance",
"sys_flow",
"lat_flow_tol",
"minimum_step",
"threads"
]
}
# dictionary to store data
groups = {}
# Definition of starting postiion of each element
def group_start_line(model):
with open(model, 'r') as inp:
groups = {}
count = 0
lines = inp.readlines()
for line in lines:
if ('[' in line) & (']' in line):
groups.update({line[1:-2].lower() : {'start': count}})
count += 1
# subselection of elements from MODEL_ELEMENTS
groups = {key:value for key,value in groups.items() if key in RELEVANT_GROUP_TYPES_INP}
LINK_TYPES = ['orifices', 'conduits', 'weirs']
NODE_TYPES = ['outfalls', 'junctions', 'storage']
for key in [key for key in groups.keys() if key in LINK_TYPES]:
groups['links_' + key] = groups.pop(key)
for key in [key for key in groups.keys() if key in NODE_TYPES]:
groups['nodes_' + key] = groups.pop(key)
groups['nodes_coordinates'] = groups.pop('coordinates')
return groups
# adding header and skip-lines to elements dict
def build_groups_dicts(model):
groups = group_start_line(model)
count = 0
for element, start_dict in groups.items():
start = start_dict['start']
with open(model, 'r') as inp:
lines = inp.readlines()
for index, line in enumerate(lines):
if (index - start == 1) & (';;' in line) & (';;--' not in line):
groups[element].update({'header':[col for col in re.split("\s\s+", line[2:-1]) if len(col) > 1]})
elif (index - start == 2) & (';;--------------' in line):
groups[element].update({'line_to_skip': index})
elif (index - start == 3):
break
# some corrrections on header because of mismatches on inp file
# groups['properties'].update({'header': MODEL_INP_COLS['PROPERTIES']})
groups['subcatchments'].update({'header': MODEL_INP_COLS['SUBCATCHMENTS']})
groups['subareas'].update({'header': MODEL_INP_COLS['SUBAREAS']})
groups['infiltration'].update({'header': MODEL_INP_COLS['INFILTRATION']})
groups['links_conduits'].update({'header': MODEL_INP_COLS['LINKS_CONDUITS']})
groups['links_weirs'].update({'header': MODEL_INP_COLS['LINKS_WEIRS']})
groups['links_orifices'].update({'header': MODEL_INP_COLS['LINKS_ORIFICES']})
groups['nodes_coordinates'].update({'header': MODEL_INP_COLS['NODES_COORDINATES']})
groups['nodes_outfalls'].update({'header': MODEL_INP_COLS['NODES_OUTFALLS']})
groups['nodes_storage'].update({'header': MODEL_INP_COLS['NODES_STORAGE']})
groups['nodes_junctions'].update({'header': MODEL_INP_COLS['NODES_JUNCTIONS']})
return groups
# %%
def list_files(directory, extension, prefix):
return (f for f in listdir(directory) if (f.endswith('.' + extension)) & (f.startswith(prefix)))
def raingages_meta_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['raingages']['start']
skip_rows = build_groups_dicts(model)['raingages']['line_to_skip']
header = ['raingage_id', 'format', 'interval', 'unit']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
formatted_line = [line[0].split()[0], line[0].split()[1], line[0].split()[2],line[0].split()[7]]
contents.append(formatted_line)
df = pd.DataFrame(data = contents, columns= header,)
df['interval'] = df['interval'].map( lambda x: datetime.strptime(x, '%H:%M'))
df.insert(0, 'precipitation_id', precipitation_id)
print('raingages','df created!')
return df
def date_parser(line):
year = line[0].split()[1]
month = line[0].split()[2].zfill(2)
day = line[0].split()[3].zfill(2)
hour = line[0].split()[4].zfill(2)
minute = line[0].split()[5].zfill(2)
str_date = '-'.join([year, month, day, hour, minute] )
date_format = '%Y-%m-%d-%H-%M'
return datetime.strptime(str_date, date_format)
# %%
def raingages_to_df(event_folder, event_id, model, model_id):
contents = []
for file in list_files(event_folder, 'txt', 'P'):
raingage_id = file.split('.')[0]
with open(os.path.join(event_folder, file), newline='') as f:
r = csv.reader(f)
for i, line in enumerate(r):
try:
formatted_line = [
raingage_id,
date_parser(line),
line[0].split()[6]
]
contents.append(formatted_line)
except:
print('error')
df_timeseries = pd.DataFrame(data = contents, columns= ['raingage_id', 'elapsed_time', 'value'])
df_timeseries.insert(0, 'precipitation_id', precipitation_id)
df_metadata = raingages_meta_to_dfs(model, model_id)
return df_metadata, df_timeseries
# %%
def load_raingages_to_db(event_folder, event_id, model, model_id):
raingage_metadata, raingage_timeseries = raingages_to_df(event_folder, event_id, model, model_id)
table_metadata = 'raingages_metadata'
table_timeseries = 'raingages_timeseries'
try:
raingage_metadata.to_sql(table_metadata, engine_base_ina, index=False, if_exists='append')
except Exception as e:
print(e)
try:
raingage_timeseries.to_sql(table_timeseries, engine_base_ina, index=False, if_exists='append')
except Exception as e:
print(e)
# def group_type_to_dfs(model, model_id, group, id_col, col_to_check, own_relevant__list, relevant_dependent_list):
# """ Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
# """
# start = build_groups_dicts(model)[group]['start']
# skip_rows = build_groups_dicts(model)[group]['line_to_skip']
# header = build_groups_dicts(model)[group]['header']
# global own_relevant__list
# own_relevant_list = []
# df = pd.DataFrame()
# with open(model, newline='') as f:
# contents = []
# r = csv.reader(f)
# for i, line in enumerate(r):
# if i >= start + 1:
# if i != skip_rows:
# if not line:
# break
# # elif i == start + 1:
# # headers = line
# else:
# if len(relevant_dependecy_list) == 0:
# own_relevant__list.append(line[0].split()[id_col])
# contents.append(line[0].split())
# else:
# if line[0].split()[col_to_check].lower() in relevant_dependent_list:
# own_relevant__list.append(line[0].split()[id_col])
# contents.append(line[0].split())
# df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
# df.insert(0, 'model_id', model_id)
# print(group,'df created!')
# return df
def conduits_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['links_conduits']['start']
skip_rows = build_groups_dicts(model)['links_conduits']['line_to_skip']
header = build_groups_dicts(model)['links_conduits']['header']
global RELEVANT_LINKS_CONDUITS
RELEVANT_LINKS_CONDUITS = []
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
if len(RELEVANT_LINKS) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0].lower() in RELEVANT_LINKS:
RELEVANT_LINKS_CONDUITS.append(line[0].split()[0])
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('conduits','df created!')
return df
def weirs_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['links_weirs']['start']
skip_rows = build_groups_dicts(model)['links_weirs']['line_to_skip']
header = build_groups_dicts(model)['links_weirs']['header']
global RELEVANT_LINKS_WEIRS
RELEVANT_LINKS_WEIRS = []
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
if len(RELEVANT_LINKS) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0].lower() in RELEVANT_LINKS:
RELEVANT_LINKS_WEIRS.append(line[0].split()[0])
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('weirs','df created!')
return df
def orifices_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
start = build_groups_dicts(model)['links_orifices']['start']
skip_rows = build_groups_dicts(model)['links_orifices']['line_to_skip']
header = build_groups_dicts(model)['links_orifices']['header']
global RELEVANT_LINKS_ORIFICES
RELEVANT_LINKS_ORIFICES = []
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif i == start + 1:
# headers = line
else:
if len(RELEVANT_LINKS) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0].lower() in RELEVANT_LINKS:
RELEVANT_LINKS_ORIFICES.append(line[0].split()[0])
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('orifices','df created!')
return df
def get_nodes_from_links(model, model_id):
conduits_df = conduits_to_dfs(model, model_id)
orifices_df = orifices_to_dfs(model, model_id)
weirs_df = weirs_to_dfs(model, model_id)
links_dfs = [
conduits_df,
orifices_df,
weirs_df
]
nodes = []
for df in links_dfs:
for col in [col for col in df.columns if 'node' in col]:
nodes += df[col].unique().tolist()
return nodes
#cambio de coordenadas
def convert_coords(coord_tuple):
transformer = pyproj.Transformer.from_crs(crs_from='epsg:' + epsg_modelo, crs_to='epsg:4326')
lon, lat = transformer.transform(coord_tuple[0], coord_tuple[1])
return (lon,lat)
def nodes_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
global RELEVANT_NODES
RELEVANT_NODES = get_nodes_from_links(model, model_id)
start = build_groups_dicts(model)['nodes_coordinates']['start']
skip_rows = build_groups_dicts(model)['nodes_coordinates']['line_to_skip']
header = build_groups_dicts(model)['nodes_coordinates']['header']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif (i == start + 1):
# headers = line
else:
if len(RELEVANT_NODES) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0] in RELEVANT_NODES:
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
cols =['lat', 'lon']
coords = []
coordinates = [(j[0], j[1]) for i,j in df[['x_coord', 'y_coord']].iterrows()]
pool = multiprocessing.Pool(8)
coords.append(pool.map(convert_coords, coordinates))
pool.close()
pool.join()
# for i in df[['x_coord', 'y_coord']].iterrows():
# coords.append(convert_coords(i[1]))
# from pyproj import Transformer
# def convert_coords(coord_tuple):
# global coords
# transformer = Transformer.from_crs(crs_from='epsg:5348' , crs_to='epsg:4326')
# lon, lat = transformer.transform(coord_tuple[0], coord_tuple[1])
# coords.append((lon, lat, coord_tuple[2]))
# return coords
# import concurrent.futures
# coords = []
# with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
# for result in executor.map(convert_coords, [(i[1], i[2], i[3]) for i in coordinates]):
# pass
# coords = result
df = pd.concat([df, pd.DataFrame(coords[0], columns=cols)], axis=1)
print('nodes','df created!')
return df
def outfalls_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
global RELEVANT_NODES
RELEVANT_NODES = get_nodes_from_links(model, model_id)
start = build_groups_dicts(model)['nodes_outfalls']['start']
skip_rows = build_groups_dicts(model)['nodes_outfalls']['line_to_skip']
header = build_groups_dicts(model)['nodes_outfalls']['header']
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import datetime
import requests
from tqdm import tqdm
from collections import Counter
import joblib
import os
# TODO: re-implement sucking data from the internet by checking for all days
# and sucking only what it needs and put that in the load_data module
# so it automatically happens whenever you load the data, rather
# than having to manually do it here.
#####
# Step 1: Update counts data
#####
# from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports
# # get today's date
# yesterdays_date_str = (datetime.date.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
# print(f'Yesterday: {yesterdays_date_str}')
# yesterdays_date_str_for_JHU_data = (datetime.date.today() - datetime.timedelta(days=1)).strftime('%m-%d-%Y')
# print(f'Yesterday: {yesterdays_date_str}')
#
# url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{yesterdays_date_str_for_JHU_data}.csv"
# r = requests.get(url, allow_redirects=True)
# with open(f'source_data/csse_covid_19_daily_reports/{yesterdays_date_str_for_JHU_data}.csv', 'w') as f:
# f.write(r.content.decode("utf-8"))
if not os.path.exists('loaded_data'):
os.mkdir('loaded_data')
today_str = datetime.datetime.today().strftime('%Y-%m-%d')
loaded_data_filename = os.path.join('loaded_data', today_str) + '.joblib'
success = False
try:
print(f'Loading {loaded_data_filename}...')
tmp_dict = joblib.load(loaded_data_filename)
map_state_to_series = tmp_dict['map_state_to_series']
current_cases_ranked_us_states = tmp_dict['current_cases_ranked_us_states']
current_cases_ranked_non_us_states = tmp_dict['current_cases_ranked_non_us_states']
current_cases_ranked_non_us_provinces = tmp_dict['current_cases_ranked_non_us_provinces']
current_cases_ranked_us_counties = tmp_dict['current_cases_ranked_us_counties']
map_state_to_current_case_cnt = tmp_dict['map_state_to_current_case_cnt']
map_state_to_fips = tmp_dict['map_state_to_fips']
print('...done!')
success = True
except:
print('...loading failed!')
if not success:
# don't download on the server
if os.environ['PWD'] != '/home/data/src/covid_model':
url = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv"
r = requests.get(url, allow_redirects=True)
with open('source_data/states.csv', 'w') as f:
f.write(r.content.decode("utf-8"))
url = "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv"
r = requests.get(url, allow_redirects=True)
with open('source_data/counties.csv', 'w') as f:
f.write(r.content.decode("utf-8"))
print('Downloading last month of data if not available')
for days_back in tqdm(range(1, 28)):
date = datetime.date.today() - datetime.timedelta(days=days_back)
date_str = date.strftime('%m-%d-%Y')
url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date_str}.csv"
filename = f'source_data/csse_covid_19_daily_reports/{date_str}.csv'
if not os.path.exists(filename):
r = requests.get(url, allow_redirects=True)
print(filename, len(r.content.decode("utf-8")))
with open(filename, 'w') as f:
f.write(r.content.decode("utf-8"))
#####
# Step 1: Get US Data States
#####
print('Processing U.S. States...')
data_dir = 'source_data'
us_full_count_data = pd.read_csv(os.path.join(data_dir, 'states.csv'))
# from https://github.com/nytimes/covid-19-data
# curl https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv
us_full_count_data['date'] = us_full_count_data['date'].astype('datetime64[ns]')
us_full_count_data['state_orig'] = us_full_count_data['state']
us_full_count_data['state'] = [f'US: {us_full_count_data.iloc[i]["state"]}' for i in range(len(us_full_count_data))]
us_full_count_data.rename(columns={'cases': 'positive', 'deaths': 'deceased'},
inplace=True)
quick_grab_tuples = list(
set(zip(*[us_full_count_data[col] for col in ['state', 'state_orig', 'fips']])))
map_state_to_fips = {tmp_tuple[0]: tmp_tuple[2] for tmp_tuple in quick_grab_tuples}
us_full_count_data = us_full_count_data[['date', 'state', 'positive', 'deceased']]
# get totals across U.S.
list_of_dict_totals = list()
for date in sorted(set(us_full_count_data['date'])):
date_iloc = [i for i, x in enumerate(us_full_count_data['date']) if x == date]
sum_cases = sum(us_full_count_data.iloc[date_iloc]['positive'])
sum_deaths = sum(us_full_count_data.iloc[date_iloc]['deceased'])
list_of_dict_totals.append({'date': date, 'positive': sum_cases, 'deceased': sum_deaths, 'state': 'US: total'})
us_total_counts_data = pd.DataFrame(list_of_dict_totals)
us_full_count_data = us_full_count_data.append(us_total_counts_data, ignore_index=True)
us_states = sorted(set(us_full_count_data['state']))
#####
# Step 1b: Get US Data Counties
#####
print('Processing U.S. Counties...')
data_dir = 'source_data'
us_county_full_data = pd.read_csv(os.path.join(data_dir, 'counties.csv'))
# from https://github.com/nytimes/covid-19-data
# curl https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv
us_county_full_data['date'] = us_county_full_data['date'].astype('datetime64[ns]')
us_county_full_data['state_orig'] = us_county_full_data['state']
us_county_full_data['state'] = [
f'US: {state}: {county}' for state, county in zip(us_county_full_data['state'], us_county_full_data['county'])]
quick_grab_tuples = list(
set(zip(*[us_county_full_data[col] for col in ['state', 'state_orig', 'county', 'fips']])))
tmp_map_state_to_fips = {tmp_tuple[0]: tmp_tuple[3] for tmp_tuple in quick_grab_tuples}
map_state_to_fips.update(tmp_map_state_to_fips)
us_county_full_data.rename(columns={'cases': 'positive', 'deaths': 'deceased'},
inplace=True)
us_county_full_data = us_county_full_data[['date', 'state', 'positive', 'deceased']]
us_counties = sorted(set(us_county_full_data['state']))
us_full_count_data = pd.concat([us_full_count_data, us_county_full_data])
######
# Step 2a: Get International Data Nations
######
print('Processing non-U.S. Nations...')
data_dir = os.path.join('source_data', 'csse_covid_19_daily_reports')
onlyfiles = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
list_of_small_dataframes = list()
for file in tqdm(sorted(onlyfiles)):
if not file.endswith('.csv'):
continue
full_filename = os.path.join(data_dir, file)
tmp_count_data = pd.read_csv(os.path.join(data_dir, file))
tmp_count_data.rename(columns={'Country_Region': 'Country/Region', 'Province_State': 'Province/State'},
inplace=True)
print(f'processing file {full_filename} with {len(tmp_count_data)} rows...')
tmp_count_data['date'] = datetime.datetime.strptime(file[:-4], '%m-%d-%Y')
list_of_small_dataframes.append(tmp_count_data)
# Filter out data associated with provinces
full_count_data = pd.concat(list_of_small_dataframes)
# null_provice_inds = [i for i, x in enumerate(full_count_data['Province/State']) if type(x) != str]
# full_count_data = full_count_data.iloc[null_provice_inds]
full_count_data = full_count_data.groupby(['date', 'Country/Region'])[['Confirmed', 'Deaths']].sum().reset_index()
full_count_data.rename(columns={'Country/Region': 'state', 'Confirmed': 'positive', 'Deaths': 'deceased'},
inplace=True)
# get totals across U.S. (again)
# us_total_counts_data['state'] = 'United States'
# full_count_data = full_count_data.append(us_total_counts_data, ignore_index=True)
non_us_countries = sorted(set(full_count_data['state']))
full_count_data = pd.concat([full_count_data, us_full_count_data])
######
# Step 2b: Get International Data Provinces
######
print('Processing non-U.S. Provinces...')
data_dir = os.path.join('source_data', 'csse_covid_19_daily_reports')
onlyfiles = [f for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
list_of_small_dataframes = list()
for file in tqdm(sorted(onlyfiles)):
if not file.endswith('.csv'):
continue
full_filename = os.path.join(data_dir, file)
tmp_count_data = pd.read_csv(os.path.join(data_dir, file))
tmp_count_data.rename(columns={'Country_Region': 'Country/Region', 'Province_State': 'Province/State'},
inplace=True)
print(f'processing file {full_filename} with {len(tmp_count_data)} rows...')
tmp_count_data['date'] = datetime.datetime.strptime(file[:-4], '%m-%d-%Y')
list_of_small_dataframes.append(tmp_count_data)
# Filter out data associated with provinces
province_full_data = pd.concat(list_of_small_dataframes)
# null_provice_inds = [i for i, x in enumerate(full_count_data['Province/State']) if type(x) != str]
# full_count_data = full_count_data.iloc[null_provice_inds]
province_full_data = province_full_data.groupby(['date', 'Country/Region', 'Province/State'])[
['Confirmed', 'Deaths']].sum().reset_index()
province_full_data['state'] = [f'{country}: {province}' for country, province in
zip(province_full_data['Country/Region'], province_full_data['Province/State'])]
province_full_data.rename(columns={'Confirmed': 'positive', 'Deaths': 'deceased'},
inplace=True)
# get totals across U.S. (again)
# us_total_counts_data['state'] = 'United States'
# full_count_data = full_count_data.append(us_total_counts_data, ignore_index=True)
non_us_provinces = sorted(set(province_full_data['state']))
full_count_data = pd.concat([full_count_data, province_full_data])
#####
# Step 4: Further processing, rendering dictionaries
#####
map_state_to_series = dict()
max_date = max(full_count_data['date']) - datetime.timedelta(days=2)
date_inds = [i for i, x in enumerate(full_count_data['date']) if x == max_date]
today_data = full_count_data.iloc[date_inds]
map_state_to_current_case_cnt = {state: cases for state, cases in zip(today_data['state'], today_data['positive'])}
current_cases_ranked_us_states = sorted(us_states, key=lambda x: -map_state_to_current_case_cnt.get(x, 0))
current_cases_ranked_us_counties = sorted(us_counties, key=lambda x: -map_state_to_current_case_cnt.get(x, 0))
current_cases_ranked_non_us_states = sorted(non_us_countries,
key=lambda x: -map_state_to_current_case_cnt.get(x, 0))
current_cases_ranked_non_us_provinces = sorted(non_us_provinces,
key=lambda x: -map_state_to_current_case_cnt.get(x, 0))
# germany_inds = [i for i, x in enumerate(full_count_data['country']) if x == 'France']
# date_sorted_inds = sorted(germany_inds, key=lambda x: full_count_data.iloc[x]['date'])
# full_count_data.iloc[date_sorted_inds[-10:]]
# build reverse index for states, since this computaiton is expensive
map_state_to_iloc = dict()
for iloc, state in enumerate(full_count_data['state']):
map_state_to_iloc.setdefault(state, list()).append(iloc)
print('Processing states, counties, and provinces...')
# data munging gets daily-differences differences by state
for state in tqdm(sorted(set(full_count_data['state']))):
state_iloc = map_state_to_iloc[state]
state_iloc = sorted(state_iloc, key=lambda x: full_count_data.iloc[x]['date'])
cases_series = pd.Series(
{date: x for date, x in
zip(full_count_data.iloc[state_iloc]['date'], full_count_data.iloc[state_iloc]['positive'])})
deaths_series = pd.Series(
{date: x for date, x in
zip(full_count_data.iloc[state_iloc]['date'], full_count_data.iloc[state_iloc]['deceased'])})
cases_series.index = pd.DatetimeIndex(cases_series.index)
deaths_series.index = | pd.DatetimeIndex(deaths_series.index) | pandas.DatetimeIndex |
import pandas as pd
import numpy as np
import pandas
import csv
import ast
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.metrics import f1_score,confusion_matrix
from sklearn.metrics import precision_score, recall_score, cohen_kappa_score , accuracy_score
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier,AdaBoostClassifier
import pickle
input_feat =[0,1,2,3,4,5,6,7,8]
# input_feat = [0,1,2,3,4]
# input_feat = [5,6,7,8]
output_feat = [9]
no_estimators=65
def is_valid_python_file(contents):
try:
ast.parse(contents)
return True
except SyntaxError:
return False
def mean(a):
return sum(a)/len(a)
def basic_model(df):
train_x = df.iloc[:,input_feat]
train_y = df.iloc[:,output_feat]
train_x =train_x.values
train_y =train_y.values
# clf = MLPClassifier(solver='lbfgs' , alpha=1e-5,hidden_layer_sizes=(100,50,2), random_state=1).fit(train_x,train_y)
# clf=svm.SVC(kernel='rbf').fit(train_x,train_y)
clf=DecisionTreeClassifier().fit(train_x,train_y)
# clf=LogisticRegression(solver='lbfgs')
model = BaggingClassifier(base_estimator=clf, n_estimators=no_estimators, random_state=7)
# model = AdaBoostClassifier(base_estimator=clf, n_estimators=no_estimators, learning_rate=5)
model=model.fit(train_x,train_y)
return model
df1=pd.read_csv("MainTable.csv")
df2=pd.read_csv("CodeState.csv")
df_merged_code=pd.merge(df1,df2,on="CodeStateID")
df_merged_code=df_merged_code.rename(columns={"Order" : "StartOrder"})
def add_features_basic(df_train):
df_train = df_train.sort_values(by=['SubjectID'])
prev_student = None
p_prior_correct = []
p_prior_completed = []
prior_attempts = []
for index, rows in df_train.iterrows():
curr_student = rows['SubjectID']
if(prev_student != curr_student):
attempts = 0
first_correct_attempts = 0
completed_attempts = 0
prev_student = curr_student
if(attempts > 0):
p_prior_correct.append(first_correct_attempts/attempts)
p_prior_completed.append(completed_attempts/attempts)
prior_attempts.append(attempts)
else:
p_prior_correct.append(1/2.0)
p_prior_completed.append(1/2.0)
prior_attempts.append(0)
if(rows['FirstCorrect']==True):
first_correct_attempts+=1
if(rows['EverCorrect']==True):
completed_attempts+=1
attempts+=1
df_train['p_prior_correct'] = p_prior_correct
df_train['p_prior_completed'] = p_prior_completed
df_train['prior_attempts'] = prior_attempts
is_syntax_error = []
has_fname_error=[]
for index, rows in df_train.iterrows():
fname=rows["ProblemID"]
if(df_train[index:index+1]['Code'].isna().sum()==1):
is_syntax_error.append(True)
continue
x = is_valid_python_file(rows['Code'])
if(x == False):
is_syntax_error.append(True)
else:
is_syntax_error.append(False)
df_train['is_syntax_error'] = is_syntax_error
is_semantic_error = []
for index, rows in df_train.iterrows():
if(rows['is_syntax_error'] == True):
is_semantic_error.append('NA')
elif(rows['is_syntax_error'] == False and rows['Correct'] == False):
is_semantic_error.append(True)
else:
is_semantic_error.append(False)
df_train['is_semantic_error'] = is_semantic_error
df_train=df_train.sort_values(["SubjectID"])
prev_student = None
p_syntax_errors = []
p_semantic_errors = []
for index, rows in df_train.iterrows():
curr_student = rows['SubjectID']
if(prev_student != curr_student):
num_syntax_errors = 0
num_semantic_errors = 0
total_attempts = 0
prev_student = curr_student
if(total_attempts == 0):
p_syntax_errors.append(1.0/3)
p_semantic_errors.append(1.0/3)
if(rows['is_syntax_error'] == True):
num_syntax_errors = num_syntax_errors + 1
if(rows['is_semantic_error'] == True):
num_semantic_errors=num_semantic_errors + 1
total_attempts+=1
else:
p_semantic_errors.append(num_semantic_errors/total_attempts)
p_syntax_errors.append(num_syntax_errors/total_attempts)
if(rows['is_syntax_error'] == True):
num_syntax_errors = num_syntax_errors + 1
if(rows['is_semantic_error'] == True):
num_semantic_errors=num_semantic_errors + 1
total_attempts+=1
df_train['pSubjectSyntaxErrors'] = p_syntax_errors
df_train['pSubjectSemanticErrors'] = p_semantic_errors
return df_train
accuracy_list=[]
f1_score_list=[]
precision_score_list=[]
kappa_score_list=[]
recall_score_list=[]
tp=[]
fp=[]
fn=[]
tn=[]
frames=[]
for i in range(10):
print("Fold=\t",i)
print("\n")
df_train=pd.read_csv("CV/Fold"+ str(i) + "/Training.csv")
df_test =pd.read_csv("CV/Fold" + str(i) + "/Test.csv")
df_train=pd.merge(df_merged_code,df_train,on=["StartOrder","SubjectID","ProblemID"])
df_test=pd.merge(df_merged_code,df_test,on=["StartOrder","SubjectID","ProblemID"])
df_train = df_train.replace(np.nan, '', regex=True)
df_test = df_test.replace(np.nan, '', regex=True)
df_pcorrect=df_train.groupby("ProblemID",as_index=False)["FirstCorrect"].mean()
df_pcorrect=df_pcorrect.rename(columns={"FirstCorrect" : "Pcorrectforproblem"})
df_train=pd.merge(df_pcorrect,df_train,on=["ProblemID"])
df_pmedian = df_train.groupby("ProblemID",as_index=False)["Attempts"].median()
df_pmedian=df_pmedian.rename(columns = {"Attempts" : "Pmedian" })
df_train=pd.merge(df_pmedian,df_train,on=["ProblemID"])
df_train=add_features_basic(df_train)
df_test = add_features_basic(df_test)
c = []
dic = {}
for index, rows in df_train.iterrows():
_id = rows['ProblemID']
if(_id in dic.keys()):
c.append(dic[_id])
else:
d = df_train[df_train['ProblemID']==_id]
f = len(d[d['is_semantic_error']==True].index)
t = len(d.index)
dic[_id] = (f*1.0)/t
c.append((f*1.0)/t)
df_train['pProblemSemanticError'] = c
df_prob_synt=df_train.groupby("ProblemID",as_index=False)["is_syntax_error"].mean()
df_prob_synt=df_prob_synt.rename(columns={"is_syntax_error" : "Prob_synt"})
df_train= | pd.merge(df_prob_synt,df_train,on=["ProblemID"]) | pandas.merge |
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import pylab
from matplotlib import colors, colorbar
from scipy import cluster
#import rpy2
#import rpy2.robjects as robjects
#from rpy2.robjects.packages import importr
from tqdm import tqdm
#from rpy2.robjects import r, numpy2ri
import time
import yaml
import networkx as nx
import argparse
sys.setrecursionlimit(10000)
from . import lineageGroup_utils as lg_utils
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# NOTE: NEED PANDAS >= 0.22.0
def create_output_dir(outputdir = None):
"""
A simple function to create an output directory to store important logging information,
as well as important figures for qc
"""
if outputdir is None:
i = 1
outputdir = "output" + str(i)
while os.path.exists(os.path.dirname(outputdir)):
i += 1
outputdir = "output" + str(i)
if not os.path.exists(outputdir):
os.makedirs(outputdir)
with open(outputdir + "/lglog.txt", "w") as f:
f.write("LINEAGE GROUP OUTPUT LOG:\n")
return outputdir
def findTopLG(PIVOT_in, iteration, outputdir, min_intbc_prop = 0.2, kinship_thresh=0.2):
# calculate sum of observed intBCs, identify top intBC
intBC_sums = PIVOT_in.sum(0).sort_values(ascending=False)
ordered_intBCs = intBC_sums.index.tolist()
intBC_top = intBC_sums.index[0]
# take subset of PIVOT table that contain cells that have the top intBC
subPIVOT_in = PIVOT_in[PIVOT_in[intBC_top]>0]
subPIVOT_in_sums = subPIVOT_in.sum(0)
ordered_intBCs2 = subPIVOT_in_sums.sort_values(ascending=False).index.tolist()
subPIVOT_in = subPIVOT_in[ordered_intBCs2]
# binarize
subPIVOT_in[subPIVOT_in>0]=1
# Define intBC set
subPIVOT_in_sums2 = subPIVOT_in.sum(0)
total = subPIVOT_in_sums2[intBC_top]
intBC_sums_filt = subPIVOT_in_sums2[subPIVOT_in_sums2>=min_intbc_prop*total]
# Reduce PIV to only intBCs considered in set
intBC_set = intBC_sums_filt.index.tolist()
PIV_set = PIVOT_in.iloc[:,PIVOT_in.columns.isin(intBC_set)]
# Calculate fraction of UMIs within intBC_set ("kinship") for each cell in PIV_set
f_inset = PIV_set.sum(axis=1)
# define set of cells with good kinship
f_inset_filt = f_inset[f_inset>=kinship_thresh]
LG_cells = f_inset_filt.index.tolist()
# Return updated PIV with LG_cells removed
PIV_noLG = PIVOT_in.iloc[~PIVOT_in.index.isin(LG_cells),:]
# Return PIV with LG_cells assigned
PIV_LG = PIVOT_in.iloc[PIVOT_in.index.isin(LG_cells),:]
PIV_LG["lineageGrp"]= iteration+1
with open(outputdir + "/lglog.txt", "a") as f:
# print statements
f.write("LG"+str(iteration+1)+" Assignment: " + str(PIV_LG.shape[0]) + " cells assigned\n")
# Plot distribution of kinship scores
h4 = plt.figure(figsize=(15,10))
ax4 = plt.hist(f_inset, bins=49, alpha=0.5, histtype='step')
yax4 = plt.yscale('log', basey=10)
plt.savefig(outputdir + "/kinship_scores.png")
return PIV_LG, PIV_noLG, intBC_set
def iterative_lg_assign(pivot_in, min_clust_size, outputdir, min_intbc_thresh=0.2, kinship_thresh=0.2):
## Run LG Assign function
# initiate output variables
PIV_assigned = pd.DataFrame()
master_intBC_list = []
# Loop for iteratively assigning LGs
prev_clust_size = np.inf
i = 0
while prev_clust_size > min_clust_size:
# run function
PIV_outs = findTopLG(pivot_in, i, outputdir, min_intbc_prop=min_intbc_thresh, kinship_thresh=kinship_thresh)
# parse returned objects
PIV_LG = PIV_outs[0]
PIV_noLG = PIV_outs[1]
intBC_set_i = PIV_outs[2]
# append returned objects to output variables
PIV_assigned = PIV_assigned.append(PIV_LG)
master_intBC_list.append(intBC_set_i)
# update PIVOT-in
pivot_in = PIV_noLG
prev_clust_size = PIV_LG.shape[0]
i += 1
return PIV_assigned, master_intBC_list
def get_lg_group(df, piv, curr_LG):
lg_group = df[df["lineageGrp"] == curr_LG]
cells = np.unique(lg_group["cellBC"])
lg_pivot = piv.loc[cells]
props = lg_pivot.apply(lambda x: pylab.sum(x) / len(x)).to_frame().reset_index()
props.columns = ["iBC", "prop"]
props = props.sort_values(by="prop", ascending=False)
props.index = props["iBC"]
return lg_group, props
def rand_cmap(nlabels, type='bright', first_color_black=True, last_color_black=False, verbose=True):
"""
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks
:param nlabels: Number of labels (size of colormap)
:param type: 'bright' for strong colors, 'soft' for pastel colors
:param first_color_black: Option to use first color as black, True or False
:param last_color_black: Option to use last color as black, True or False
:param verbose: Prints the number of labels and shows the colormap. True or False
:return: colormap for matplotlib
"""
from matplotlib.colors import LinearSegmentedColormap
import colorsys
if type not in ('bright', 'soft'):
print ('Please choose "bright" or "soft" for type')
return
if verbose:
print('Number of labels: ' + str(nlabels))
# Generate color map for bright colors, based on hsv
if type == 'bright':
randHSVcolors = [(np.random.uniform(low=0.0, high=1),
np.random.uniform(low=0.2, high=1),
np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]
# Convert HSV list to RGB
randRGBcolors = []
for HSVcolor in randHSVcolors:
randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
# Generate soft pastel colors, by limiting the RGB spectrum
if type == 'soft':
low = 0.6
high = 0.95
randRGBcolors = [(np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high)) for i in range(nlabels)]
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
# Display colorbar
if verbose:
from matplotlib import colors, colorbar
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))
bounds = np.linspace(0, nlabels, nlabels + 1)
norm = colors.BoundaryNorm(bounds, nlabels)
cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None,
boundaries=bounds, format='%1i', orientation=u'horizontal')
return random_colormap
def assign_lineage_groups(dfMT, max_kinship_LG, master_intBCs):
"""
Assign cells in the allele table to a lineage group
:param alleletable: allele table
:param ind1: clusterings
:param df_pivot_I: binary pivot table relating cell BC to integration BC
:return: allele table with lineage group assignments
"""
dfMT["lineageGrp"]=0
cellBC2LG = {}
for n in max_kinship_LG.index:
cellBC2LG[n] = max_kinship_LG.loc[n, "lineageGrp"]
dfMT["lineageGrp"] = dfMT["cellBC"].map(cellBC2LG)
dfMT["lineageGrp"] = dfMT["lineageGrp"].fillna(value=0)
lg_sizes = {}
rename_lg = {}
for n, g in dfMT.groupby(["lineageGrp"]):
if n != 0:
lg_sizes[n] = len(g["cellBC"].unique())
sorted_by_value = sorted(lg_sizes.items(), key = lambda kv: kv[1])[::-1]
for i, tup in zip(range(1, len(sorted_by_value)+1), sorted_by_value):
print(i, tup[0], float(i))
rename_lg[tup[0]] = float(i)
rename_lg[0] = 0.0
dfMT["lineageGrp"] = dfMT.apply(lambda x: rename_lg[x.lineageGrp], axis=1)
return dfMT
def plot_overlap_heatmap(at_pivot_I, at, outputdir):
# remove old plots
plt.close()
flat_master = []
for n, lg in at.groupby("lineageGrp"):
for item in lg["intBC"].unique():
flat_master.append(item)
at_pivot_I = at_pivot_I[flat_master]
h2 = plt.figure(figsize=(20,20))
axmat2 = h2.add_axes([0.3,0.1,0.6,0.8])
im2 = axmat2.matshow(at_pivot_I, aspect='auto', origin='upper')
plt.savefig(outputdir + "/clustered_intbc.png")
plt.close()
def add_cutsite_encoding(lg_group):
lg_group["s1"] = 0
lg_group["s2"] = 0
lg_group["s3"] = 0
for i in lg_group.index:
if lg_group.loc[i, "r1"] == "['None']":
lg_group.loc[i, "s1"] = .9
elif "D" in lg_group.loc[i, "r1"]:
lg_group.loc[i, "s1"] = 1.9
elif 'I' in lg_group.loc[i, "r1"]:
lg_group.loc[i, 's1'] = 2.9
if lg_group.loc[i, "r2"] == "['None']":
lg_group.loc[i, "s2"] = .9
elif "D" in lg_group.loc[i, "r2"]:
lg_group.loc[i, "s2"] = 1.9
elif 'I' in lg_group.loc[i, "r2"]:
lg_group.loc[i, 's2'] = 2.9
if lg_group.loc[i, "r3"] == "['None']":
lg_group.loc[i, "s3"] = .9
elif "D" in lg_group.loc[i, "r3"]:
lg_group.loc[i, "s3"] = 1.9
elif 'I' in lg_group.loc[i, "r3"]:
lg_group.loc[i, 's3'] = 2.9
return lg_group
def plot_overlap_heatmap_lg(at, at_pivot_I, outputdir):
if not os.path.exists(outputdir + "/lineageGrp_piv_heatmaps"):
os.makedirs(outputdir + "/lineageGrp_piv_heatmaps")
for n, lg_group in tqdm(at.groupby("lineageGrp")):
plt.close()
lg_group = add_cutsite_encoding(lg_group)
s_cmap = colors.ListedColormap(['grey', 'red', 'blue'], N=3)
lg_group_pivot = pd.pivot_table(lg_group, index=["cellBC"], columns=["intBC"], values=['s1', 's2', 's3'], aggfunc=pylab.mean).T
lg_group_pivot2 = pd.pivot_table(lg_group,index=['cellBC'],columns=['intBC'],values='UMI',aggfunc=pylab.size)
cell_umi_count = lg_group.groupby(["cellBC"]).agg({"UMI": "count"}).sort_values(by="UMI")
n_unique_alleles = lg_group.groupby(["intBC"]).agg({"r1": "nunique", "r2": "nunique", "r3": "nunique"})
cellBCList = lg_group["cellBC"].unique()
col_order = lg_group_pivot2.dropna(axis=1, how="all").sum().sort_values(ascending=False,inplace=False).index
if len(col_order) < 2:
continue
s3 = lg_group_pivot.unstack(level=0).T
s3 = s3[col_order]
s3 = s3.T.stack(level=1).T
s3 = s3.loc[cell_umi_count.index]
s3_2 = lg_group_pivot2.dropna(axis=1, how="all").sum().sort_values(ascending=False, inplace=False)[col_order]
n_unique_alleles = n_unique_alleles.loc[col_order]
s3_intBCs = col_order
s3_cellBCs = s3.index.tolist()
# Plot heatmap
h = plt.figure(figsize=(14,10))
ax = h.add_axes([0.3, 0.1, 0.6, 0.8],frame_on=True)
im = ax.matshow(s3, aspect='auto', origin ="lower", cmap=s_cmap)
axx1 = plt.xticks(range(1, len(col_order)*3, 3), col_order, rotation='vertical', family="monospace")
ax3 = h.add_axes([0.2, 0.1, 0.1, 0.8], frame_on=True)
plt.barh(range(s3.shape[0]), cell_umi_count["UMI"])
plt.ylim([0, s3.shape[0]])
ax3.autoscale(tight=True)
axy0 = ax3.set_yticks(range(len(s3_cellBCs)))
axy1 = ax3.set_yticklabels(s3_cellBCs, family='monospace')
w = (1/3)
x = np.arange(len(s3_intBCs))
ax2 = h.add_axes([0.3, 0, 0.6, 0.1], frame_on = False)
b1 = ax2.bar(x - w, n_unique_alleles["r1"], width = w, label="r1")
b2 = ax2.bar(x, n_unique_alleles["r2"], width = w, label="r2")
b3 = ax2.bar(x + w, n_unique_alleles["r3"], width = w, label='r3')
ax2.set_xlim([0, len(s3_intBCs)])
ax2.set_ylim(ymin=0, ymax=(max(n_unique_alleles["r1"].max(), n_unique_alleles["r2"].max(), n_unique_alleles["r3"].max()) + 10))
ax2.set_xticks([])
ax2.yaxis.tick_right()
ax2.invert_yaxis()
ax2.autoscale(tight=True)
plt.legend()
#plt.gcf().subplots_adjust(bottom=0.15)
plt.tight_layout()
plt.savefig(outputdir + "/lineageGrp_piv_heatmaps/lg_" + str(int(n)) + "_piv_heatmap.png")
plt.close()
def collectAlleles(at, thresh = 0.05):
lineageGrps = at["lineageGrp"].unique()
at_piv = | pd.pivot_table(at, index="cellBC", columns="intBC", values="UMI", aggfunc="count") | pandas.pivot_table |
import inspect
import os
import datetime
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal, assert_frame_equal
from numpy.testing import assert_allclose
from pvlib import tmy
from pvlib import pvsystem
from pvlib import clearsky
from pvlib import irradiance
from pvlib import atmosphere
from pvlib import solarposition
from pvlib.location import Location
from conftest import needs_numpy_1_10, requires_scipy
latitude = 32.2
longitude = -111
tus = Location(latitude, longitude, 'US/Arizona', 700, 'Tucson')
times = pd.date_range(start=datetime.datetime(2014,1,1),
end=datetime.datetime(2014,1,2), freq='1Min')
ephem_data = solarposition.get_solarposition(times,
latitude=latitude,
longitude=longitude,
method='nrel_numpy')
am = atmosphere.relativeairmass(ephem_data.apparent_zenith)
irrad_data = clearsky.ineichen(ephem_data['apparent_zenith'], am,
linke_turbidity=3)
aoi = irradiance.aoi(0, 0, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
meta = {'latitude': 37.8,
'longitude': -122.3,
'altitude': 10,
'Name': 'Oakland',
'State': 'CA',
'TZ': -8}
pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(tmy)))
tmy3_testfile = os.path.join(pvlib_abspath, 'data', '703165TY.csv')
tmy2_testfile = os.path.join(pvlib_abspath, 'data', '12839.tm2')
tmy3_data, tmy3_metadata = tmy.readtmy3(tmy3_testfile)
tmy2_data, tmy2_metadata = tmy.readtmy2(tmy2_testfile)
def test_systemdef_tmy3():
expected = {'tz': -9.0,
'albedo': 0.1,
'altitude': 7.0,
'latitude': 55.317,
'longitude': -160.517,
'name': '"SAND POINT"',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy3_metadata, 0, 0, .1, 5, 5)
def test_systemdef_tmy2():
expected = {'tz': -5,
'albedo': 0.1,
'altitude': 2.0,
'latitude': 25.8,
'longitude': -80.26666666666667,
'name': 'MIAMI',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy2_metadata, 0, 0, .1, 5, 5)
def test_systemdef_dict():
expected = {'tz': -8, ## Note that TZ is float, but Location sets tz as string
'albedo': 0.1,
'altitude': 10,
'latitude': 37.8,
'longitude': -122.3,
'name': 'Oakland',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 5}
assert expected == pvsystem.systemdef(meta, 5, 0, .1, 5, 5)
@needs_numpy_1_10
def test_ashraeiam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.ashraeiam(thetas, .05)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_ashraeiam():
module_parameters = pd.Series({'b': 0.05})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.ashraeiam(thetas)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_physicaliam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.physicaliam(thetas, 1.526, 0.002, 4)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_physicaliam():
module_parameters = pd.Series({'K': 4, 'L': 0.002, 'n': 1.526})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.physicaliam(thetas)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
# if this completes successfully we'll be able to do more tests below.
@pytest.fixture(scope="session")
def sam_data():
data = {}
data['cecmod'] = pvsystem.retrieve_sam('cecmod')
data['sandiamod'] = pvsystem.retrieve_sam('sandiamod')
data['cecinverter'] = pvsystem.retrieve_sam('cecinverter')
return data
@pytest.fixture(scope="session")
def sapm_module_params(sam_data):
modules = sam_data['sandiamod']
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = modules[module]
return module_parameters
@pytest.fixture(scope="session")
def cec_module_params(sam_data):
modules = sam_data['cecmod']
module = 'Example_Module'
module_parameters = modules[module]
return module_parameters
def test_sapm(sapm_module_params):
times = pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1, 0.5, 1.1, np.nan, 1], index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with a dict input
pvsystem.sapm(effective_irradiance, temp_cell,
sapm_module_params.to_dict())
def test_PVSystem_sapm(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1, 0.5, 1.1, np.nan, 1], index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = system.sapm(effective_irradiance, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
airmass = pd.Series([1, 10], index=times)
out = system.sapm_spectral_loss(airmass)
@pytest.mark.parametrize('aoi,expected', [
(45, 0.9975036250000002),
(np.array([[-30, 30, 100, np.nan]]),
np.array([[np.nan, 1.007572, 0, np.nan]])),
(pd.Series([80]), pd.Series([0.597472]))
])
def test_sapm_aoi_loss(sapm_module_params, aoi, expected):
out = pvsystem.sapm_aoi_loss(aoi, sapm_module_params)
if isinstance(aoi, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_sapm_aoi_loss_limits():
module_parameters = {'B0': 5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters) == 5
module_parameters = {'B0': 5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters, upper=1) == 1
module_parameters = {'B0': -5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters) == 0
def test_PVSystem_sapm_aoi_loss(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
aoi = pd.Series([45, 10], index=times)
out = system.sapm_aoi_loss(aoi)
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45, 1000], 1.1400510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10]),
1000],
np.array([np.nan, np.nan, 1.081157])),
([pd.Series([1000]), pd.Series([100]), pd.Series([1.1]),
| pd.Series([10]) | pandas.Series |
import datetime
from collections import OrderedDict
import warnings
import numpy as np
from numpy import array, nan
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from conftest import assert_frame_equal, assert_series_equal
from pvlib import irradiance
from conftest import requires_ephem, requires_numba
# fixtures create realistic test input data
# test input data generated at Location(32.2, -111, 'US/Arizona', 700)
# test input data is hard coded to avoid dependencies on other parts of pvlib
@pytest.fixture
def times():
# must include night values
return pd.date_range(start='20140624', freq='6H', periods=4,
tz='US/Arizona')
@pytest.fixture
def irrad_data(times):
return pd.DataFrame(np.array(
[[ 0. , 0. , 0. ],
[ 79.73860422, 316.1949056 , 40.46149818],
[1042.48031487, 939.95469881, 118.45831879],
[ 257.20751138, 646.22886049, 62.03376265]]),
columns=['ghi', 'dni', 'dhi'], index=times)
@pytest.fixture
def ephem_data(times):
return pd.DataFrame(np.array(
[[124.0390863 , 124.0390863 , -34.0390863 , -34.0390863 ,
352.69550699, -2.36677158],
[ 82.85457044, 82.97705621, 7.14542956, 7.02294379,
66.71410338, -2.42072165],
[ 10.56413562, 10.56725766, 79.43586438, 79.43274234,
144.76567754, -2.47457321],
[ 72.41687122, 72.46903556, 17.58312878, 17.53096444,
287.04104128, -2.52831909]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
@pytest.fixture
def dni_et(times):
return np.array(
[1321.1655834833093, 1321.1655834833093, 1321.1655834833093,
1321.1655834833093])
@pytest.fixture
def relative_airmass(times):
return pd.Series([np.nan, 7.58831596, 1.01688136, 3.27930443], times)
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('testval, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', pytest.param('pyephem', marks=requires_ephem)])
def test_get_extra_radiation(testval, expected, method):
out = irradiance.get_extra_radiation(testval, method=method)
assert_allclose(out, expected, atol=10)
def test_get_extra_radiation_epoch_year():
out = irradiance.get_extra_radiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
@requires_numba
def test_get_extra_radiation_nrel_numba(times):
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
result = irradiance.get_extra_radiation(
times, method='nrel', how='numba', numthreads=4)
# and reset to no-numba state
irradiance.get_extra_radiation(times, method='nrel')
assert_allclose(result,
[1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_get_extra_radiation_invalid():
with pytest.raises(ValueError):
irradiance.get_extra_radiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.get_ground_diffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(40, irrad_data['ghi'])
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(
40, irrad_data['ghi'], albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface(irrad_data):
with pytest.raises(KeyError):
irradiance.get_ground_diffuse(
40, irrad_data['ghi'], surface_type='invalid')
def test_grounddiffuse_albedo_surface(irrad_data):
result = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series(irrad_data):
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
# klucher inputs
surface_tilt, surface_azimuth = 40.0, 180.0
dhi, ghi = 100.0, 900.0
solar_zenith, solar_azimuth = 20.0, 180.0
# expect same result for floats and pd.Series
expected = irradiance.klucher(
surface_tilt, surface_azimuth,
pd.Series(dhi), pd.Series(ghi),
pd.Series(solar_zenith), pd.Series(solar_azimuth)
) # 94.99429931664851
result = irradiance.klucher(
surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth
)
assert_allclose(result, expected[0])
def test_klucher_series(irrad_data, ephem_data):
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
# pvlib matlab 1.4 does not contain the max(cos_tt, 0) correction
# so, these values are different
assert_allclose(result, [0., 36.789794, 109.209347, 56.965916], atol=1e-4)
# expect same result for np.array and pd.Series
expected = irradiance.klucher(
40, 180, irrad_data['dhi'].values, irrad_data['ghi'].values,
ephem_data['apparent_zenith'].values, ephem_data['azimuth'].values
)
assert_allclose(result, expected, atol=1e-4)
def test_haydavies(irrad_data, ephem_data, dni_et):
result = irradiance.haydavies(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [0, 27.1775, 102.9949, 33.1909], atol=1e-4)
def test_reindl(irrad_data, ephem_data, dni_et):
result = irradiance.reindl(
40, 180, irrad_data['dhi'], irrad_data['dni'], irrad_data['ghi'],
dni_et, ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [np.nan, 27.9412, 104.1317, 34.1663], atol=1e-4)
def test_king(irrad_data, ephem_data):
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=irrad_data.index)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass,
return_components=True)
expected = pd.DataFrame(np.array(
[[ 0. , 31.46046871, np.nan, 45.45539877],
[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['sky_diffuse', 'isotropic', 'circumsolar', 'horizon'],
index=irrad_data.index
)
expected_for_sum = expected['sky_diffuse'].copy()
expected_for_sum.iloc[2] = 0
sum_components = out.iloc[:, 1:].sum(axis=1)
sum_components.name = 'sky_diffuse'
assert_frame_equal(out, expected, check_less_precise=2)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
def test_perez_arrays(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values,
relative_airmass.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
assert isinstance(out, np.ndarray)
def test_perez_scalar():
# copied values from fixtures
out = irradiance.perez(40, 180, 118.45831879, 939.95469881,
1321.1655834833093, 10.56413562, 144.76567754,
1.01688136)
# this will fail. out is ndarry with ndim == 0. fix in future version.
# assert np.isscalar(out)
assert_allclose(out, 109.084332)
@pytest.mark.parametrize('model', ['isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'])
def test_sky_diffuse_zenith_close_to_90(model):
# GH 432
sky_diffuse = irradiance.get_sky_diffuse(
30, 180, 89.999, 230,
dni=10, ghi=51, dhi=50, dni_extra=1360, airmass=12, model=model)
assert sky_diffuse < 100
def test_get_sky_diffuse_invalid():
with pytest.raises(ValueError):
irradiance.get_sky_diffuse(
30, 180, 0, 180, 1000, 1100, 100, dni_extra=1360, airmass=1,
model='invalid')
def test_liujordan():
expected = pd.DataFrame(np.array(
[[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
def test_get_total_irradiance(irrad_data, ephem_data, dni_et, relative_airmass):
models = ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
for model in models:
total = irradiance.get_total_irradiance(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=relative_airmass,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_get_total_irradiance_scalars(model):
total = irradiance.get_total_irradiance(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_poa_components(irrad_data, ephem_data, dni_et, relative_airmass):
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
gr_sand = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], relative_airmass)
out = irradiance.poa_components(
aoi, irrad_data['dni'], diff_perez, gr_sand)
expected = pd.DataFrame(np.array(
[[ 0. , -0. , 0. , 0. ,
0. ],
[ 35.19456561, 0. , 35.19456561, 31.4635077 ,
3.73105791],
[956.18253696, 798.31939281, 157.86314414, 109.08433162,
48.77881252],
[ 90.99624896, 33.50143401, 57.49481495, 45.45978964,
12.03502531]]),
columns=['poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=irrad_data.index)
assert_frame_equal(out, expected)
@pytest.mark.parametrize('pressure,expected', [
(93193, [[830.46567, 0.79742, 0.93505],
[676.09497, 0.63776, 3.02102]]),
(None, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]]),
(101325, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]])
])
def test_disc_value(pressure, expected):
# see GH 449 for pressure=None vs. 101325.
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2014-06-24T1200', '2014-06-24T1800'],
tz='America/Phoenix')
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
out = irradiance.disc(ghi, zenith, times, pressure=pressure)
expected_values = np.array(expected)
expected = pd.DataFrame(expected_values, columns=columns, index=times)
# check the pandas dataframe. check_less_precise is weird
assert_frame_equal(out, expected, check_less_precise=True)
# use np.assert_allclose to check values more clearly
assert_allclose(out.values, expected_values, atol=1e-5)
def test_disc_overirradiance():
columns = ['dni', 'kt', 'airmass']
ghi = np.array([3000])
solar_zenith = np.full_like(ghi, 0)
times = pd.date_range(start='2016-07-19 12:00:00', freq='1s',
periods=len(ghi), tz='America/Phoenix')
out = irradiance.disc(ghi=ghi, solar_zenith=solar_zenith,
datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[8.72544336e+02, 1.00000000e+00, 9.99493933e-01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_disc_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith and/or max_airmass keep these results reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100)
expected = pd.DataFrame(np.array(
[[0., 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[277.50185968, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith keeps this result reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_airmass=100)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 36.39544757]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow zenith to be close to 90 and airmass to be infinite
# and we get crazy values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[6.68577449e+03, 1.16046346e-02, 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow min cos zenith to be 0, zenith to be close to 90,
# and airmass to be very big and we get even higher DNI values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[7.21238390e+03, 1., 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_dirint_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure)
assert_almost_equal(dirint_data.values,
np.array([868.8, 699.7]), 1)
def test_dirint_nans():
times = pd.date_range(start='2014-06-24T12-0700', periods=5, freq='6H')
ghi = pd.Series([np.nan, 1038.62, 1038.62, 1038.62, 1038.62], index=times)
zenith = pd.Series([10.567, np.nan, 10.567, 10.567, 10.567], index=times)
pressure = pd.Series([93193., 93193., np.nan, 93193., 93193.], index=times)
temp_dew = pd.Series([10, 10, 10, np.nan, 10], index=times)
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=temp_dew)
assert_almost_equal(dirint_data.values,
np.array([np.nan, np.nan, np.nan, np.nan, 893.1]), 1)
def test_dirint_tdew():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=10)
assert_almost_equal(dirint_data.values,
np.array([882.1, 672.6]), 1)
def test_dirint_no_delta_kt():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=False)
assert_almost_equal(dirint_data.values,
np.array([861.9, 670.4]), 1)
def test_dirint_coeffs():
coeffs = irradiance._get_dirint_coeffs()
assert coeffs[0, 0, 0, 0] == 0.385230
assert coeffs[0, 1, 2, 1] == 0.229970
assert coeffs[3, 2, 6, 3] == 1.032260
def test_dirint_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
# times don't have any physical relevance
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([0, 1], index=times)
solar_zenith = pd.Series([90, 89.99], index=times)
out = irradiance.dirint(ghi, solar_zenith, times)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, max_zenith=90)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=90)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=100)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
def test_gti_dirint():
times = pd.DatetimeIndex(
['2014-06-24T06-0700', '2014-06-24T09-0700', '2014-06-24T12-0700'])
poa_global = np.array([20, 300, 1000])
aoi = np.array([100, 70, 10])
zenith = np.array([80, 45, 20])
azimuth = np.array([90, 135, 180])
surface_tilt = 30
surface_azimuth = 180
# test defaults
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth)
expected_col_order = ['ghi', 'dni', 'dhi']
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 288.22574368, 60.59964218, 245.37532576],
[ 931.04078010, 695.94965324, 277.06172442]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test ignore calculate_gt_90
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
calculate_gt_90=False)
expected_no_90 = expected.copy()
expected_no_90.iloc[0, :] = np.nan
assert_frame_equal(output, expected_no_90)
# test pressure input
pressure = 93193.
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
pressure=pressure)
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 289.81109139, 60.52460392, 247.01373353],
[ 932.46756378, 648.05001357, 323.49974813]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test albedo input
albedo = 0.05
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
albedo=albedo)
expected = pd.DataFrame(array(
[[ 21.3592591, 0. , 21.3592591 ],
[ 292.5162373, 64.42628826, 246.95997198],
[ 941.6753031, 727.16311901, 258.36548605]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test temp_dew input
temp_dew = np.array([70, 80, 20])
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
temp_dew=temp_dew)
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 292.40468994, 36.79559287, 266.3862767 ],
[ 931.79627208, 689.81549269, 283.5817439]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
def test_erbs():
index = pd.DatetimeIndex(['20190101']*3 + ['20190620'])
ghi = pd.Series([0, 50, 1000, 1000], index=index)
zenith = pd.Series([120, 85, 10, 10], index=index)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[9.67192672e+01, 4.15703604e+01, 4.05723511e-01],
[7.94205651e+02, 2.17860117e+02, 7.18132729e-01],
[8.42001578e+02, 1.70790318e+02, 7.68214312e-01]]),
columns=['dni', 'dhi', 'kt'], index=index)
out = irradiance.erbs(ghi, zenith, index)
assert_frame_equal(np.round(out, 0), np.round(expected, 0))
def test_erbs_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'dhi', 'kt']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
# max_zenith keeps these results reasonable
out = irradiance.erbs(ghi=1.0, zenith=89.99999,
datetime_or_doy=times, min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0., 1., 1.]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# 4-5 9s will produce bad behavior without max_zenith limit
out = irradiance.erbs(ghi=1.0, zenith=89.99999,
datetime_or_doy=times, max_zenith=100)
expected = pd.DataFrame(np.array(
[[6.00115286e+03, 9.98952601e-01, 1.16377640e-02]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# 1-2 9s will produce bad behavior without either limit
out = irradiance.erbs(ghi=1.0, zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[4.78419761e+03, 1.65000000e-01, 1.00000000e+00]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# check default behavior under hardest condition
out = irradiance.erbs(ghi=1.0, zenith=90, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0., 1., 0.01163776]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_erbs_all_scalar():
ghi = 1000
zenith = 10
doy = 180
expected = OrderedDict()
expected['dni'] = 8.42358014e+02
expected['dhi'] = 1.70439297e+02
expected['kt'] = 7.68919470e-01
out = irradiance.erbs(ghi, zenith, doy)
for k, v in out.items():
assert_allclose(v, expected[k], 5)
def test_dirindex(times):
ghi = pd.Series([0, 0, 1038.62, 254.53], index=times)
ghi_clearsky = pd.Series(
np.array([0., 79.73860422, 1042.48031487, 257.20751138]),
index=times
)
dni_clearsky = pd.Series(
np.array([0., 316.1949056, 939.95469881, 646.22886049]),
index=times
)
zenith = pd.Series(
np.array([124.0390863, 82.85457044, 10.56413562, 72.41687122]),
index=times
)
pressure = 93193.
tdew = 10.
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky,
zenith, times, pressure=pressure,
temp_dew=tdew)
dirint_close_values = irradiance.dirint(ghi, zenith, times,
pressure=pressure,
use_delta_kt_prime=True,
temp_dew=tdew).values
expected_out = np.array([np.nan, 0., 748.31562753, 630.72592644])
tolerance = 1e-8
assert np.allclose(out, expected_out, rtol=tolerance, atol=0,
equal_nan=True)
tol_dirint = 0.2
assert np.allclose(out.values, dirint_close_values, rtol=tol_dirint, atol=0,
equal_nan=True)
def test_dirindex_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
# times don't have any physical relevance
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([0, 1], index=times)
ghi_clearsky = pd.Series([0, 1], index=times)
dni_clearsky = pd.Series([0, 5], index=times)
solar_zenith = pd.Series([90, 89.99], index=times)
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky, solar_zenith,
times)
expected = pd.Series([nan, nan], index=times)
assert_series_equal(out, expected)
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky, solar_zenith,
times, min_cos_zenith=0)
expected = pd.Series([nan, nan], index=times)
assert_series_equal(out, expected)
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky, solar_zenith,
times, max_zenith=90)
expected = pd.Series([nan, nan], index=times)
assert_series_equal(out, expected)
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky, solar_zenith,
times, min_cos_zenith=0, max_zenith=100)
expected = pd.Series([nan, 5.], index=times)
assert_series_equal(out, expected)
def test_dni():
ghi = pd.Series([90, 100, 100, 100, 100])
dhi = pd.Series([100, 90, 50, 50, 50])
zenith = pd.Series([80, 100, 85, 70, 85])
clearsky_dni = pd.Series([50, 50, 200, 50, 300])
dni = irradiance.dni(ghi, dhi, zenith,
clearsky_dni=clearsky_dni, clearsky_tolerance=2)
assert_series_equal(dni,
pd.Series([float('nan'), float('nan'), 400,
146.190220008, 573.685662283]))
dni = irradiance.dni(ghi, dhi, zenith)
assert_series_equal(dni,
pd.Series([float('nan'), float('nan'), 573.685662283,
146.190220008, 573.685662283]))
@pytest.mark.parametrize(
'surface_tilt,surface_azimuth,solar_zenith,' +
'solar_azimuth,aoi_expected,aoi_proj_expected',
[(0, 0, 0, 0, 0, 1),
(30, 180, 30, 180, 0, 1),
(30, 180, 150, 0, 180, -1),
(90, 0, 30, 60, 75.5224878, 0.25),
(90, 0, 30, 170, 119.4987042, -0.4924038)])
def test_aoi_and_aoi_projection(surface_tilt, surface_azimuth, solar_zenith,
solar_azimuth, aoi_expected,
aoi_proj_expected):
aoi = irradiance.aoi(surface_tilt, surface_azimuth, solar_zenith,
solar_azimuth)
assert_allclose(aoi, aoi_expected, atol=1e-6)
aoi_projection = irradiance.aoi_projection(
surface_tilt, surface_azimuth, solar_zenith, solar_azimuth)
assert_allclose(aoi_projection, aoi_proj_expected, atol=1e-6)
@pytest.fixture
def airmass_kt():
# disc algorithm stopped at am=12. test am > 12 for out of range behavior
return np.array([1, 5, 12, 20])
def test_kt_kt_prime_factor(airmass_kt):
out = irradiance._kt_kt_prime_factor(airmass_kt)
expected = np.array([ 0.999971, 0.723088, 0.548811, 0.471068])
assert_allclose(out, expected, atol=1e-5)
def test_clearsky_index():
ghi = np.array([-1., 0., 1., 500., 1000., np.nan])
ghi_measured, ghi_modeled = np.meshgrid(ghi, ghi)
# default max_clearsky_index
with np.errstate(invalid='ignore', divide='ignore'):
out = irradiance.clearsky_index(ghi_measured, ghi_modeled)
expected = np.array(
[[1. , 0. , 0. , 0. , 0. , np.nan],
[0. , 0. , 0. , 0. , 0. , np.nan],
[0. , 0. , 1. , 2. , 2. , np.nan],
[0. , 0. , 0.002 , 1. , 2. , np.nan],
[0. , 0. , 0.001 , 0.5 , 1. , np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
assert_allclose(out, expected, atol=0.001)
# specify max_clearsky_index
with np.errstate(invalid='ignore', divide='ignore'):
out = irradiance.clearsky_index(ghi_measured, ghi_modeled,
max_clearsky_index=1.5)
expected = np.array(
[[1. , 0. , 0. , 0. , 0. , np.nan],
[0. , 0. , 0. , 0. , 0. , np.nan],
[0. , 0. , 1. , 1.5 , 1.5 , np.nan],
[0. , 0. , 0.002 , 1. , 1.5 , np.nan],
[0. , 0. , 0.001 , 0.5 , 1. , np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
assert_allclose(out, expected, atol=0.001)
# scalars
out = irradiance.clearsky_index(10, 1000)
expected = 0.01
assert_allclose(out, expected, atol=0.001)
# series
times = pd.date_range(start='20180601', periods=2, freq='12H')
ghi_measured = pd.Series([100, 500], index=times)
ghi_modeled = pd.Series([500, 1000], index=times)
out = irradiance.clearsky_index(ghi_measured, ghi_modeled)
expected = pd.Series([0.2, 0.5], index=times)
assert_series_equal(out, expected)
def test_clearness_index():
ghi = np.array([-1, 0, 1, 1000])
solar_zenith = np.array([180, 90, 89.999, 0])
ghi, solar_zenith = np.meshgrid(ghi, solar_zenith)
# default min_cos_zenith
out = irradiance.clearness_index(ghi, solar_zenith, 1370)
# np.set_printoptions(precision=3, floatmode='maxprec', suppress=True)
expected = np.array(
[[0. , 0. , 0.011, 2. ],
[0. , 0. , 0.011, 2. ],
[0. , 0. , 0.011, 2. ],
[0. , 0. , 0.001, 0.73 ]])
assert_allclose(out, expected, atol=0.001)
# specify min_cos_zenith
with np.errstate(invalid='ignore', divide='ignore'):
out = irradiance.clearness_index(ghi, solar_zenith, 1400,
min_cos_zenith=0)
expected = np.array(
[[0. , nan, 2. , 2. ],
[0. , 0. , 2. , 2. ],
[0. , 0. , 2. , 2. ],
[0. , 0. , 0.001, 0.714]])
assert_allclose(out, expected, atol=0.001)
# specify max_clearness_index
out = irradiance.clearness_index(ghi, solar_zenith, 1370,
max_clearness_index=0.82)
expected = np.array(
[[ 0. , 0. , 0.011, 0.82 ],
[ 0. , 0. , 0.011, 0.82 ],
[ 0. , 0. , 0.011, 0.82 ],
[ 0. , 0. , 0.001, 0.73 ]])
assert_allclose(out, expected, atol=0.001)
# specify min_cos_zenith and max_clearness_index
with np.errstate(invalid='ignore', divide='ignore'):
out = irradiance.clearness_index(ghi, solar_zenith, 1400,
min_cos_zenith=0,
max_clearness_index=0.82)
expected = np.array(
[[ 0. , nan, 0.82 , 0.82 ],
[ 0. , 0. , 0.82 , 0.82 ],
[ 0. , 0. , 0.82 , 0.82 ],
[ 0. , 0. , 0.001, 0.714]])
assert_allclose(out, expected, atol=0.001)
# scalars
out = irradiance.clearness_index(1000, 10, 1400)
expected = 0.725
assert_allclose(out, expected, atol=0.001)
# series
times = pd.date_range(start='20180601', periods=2, freq='12H')
ghi = pd.Series([0, 1000], index=times)
solar_zenith = pd.Series([90, 0], index=times)
extra_radiation = pd.Series([1360, 1400], index=times)
out = irradiance.clearness_index(ghi, solar_zenith, extra_radiation)
expected = pd.Series([0, 0.714285714286], index=times)
assert_series_equal(out, expected)
def test_clearness_index_zenith_independent(airmass_kt):
clearness_index = np.array([-1, 0, .1, 1])
clearness_index, airmass_kt = np.meshgrid(clearness_index, airmass_kt)
out = irradiance.clearness_index_zenith_independent(clearness_index,
airmass_kt)
expected = np.array(
[[0. , 0. , 0.1 , 1. ],
[0. , 0. , 0.138, 1.383],
[0. , 0. , 0.182, 1.822],
[0. , 0. , 0.212, 2. ]])
assert_allclose(out, expected, atol=0.001)
# test max_clearness_index
out = irradiance.clearness_index_zenith_independent(
clearness_index, airmass_kt, max_clearness_index=0.82)
expected = np.array(
[[ 0. , 0. , 0.1 , 0.82 ],
[ 0. , 0. , 0.138, 0.82 ],
[ 0. , 0. , 0.182, 0.82 ],
[ 0. , 0. , 0.212, 0.82 ]])
assert_allclose(out, expected, atol=0.001)
# scalars
out = irradiance.clearness_index_zenith_independent(.4, 2)
expected = 0.443
assert_allclose(out, expected, atol=0.001)
# series
times = pd.date_range(start='20180601', periods=2, freq='12H')
clearness_index = | pd.Series([0, .5], index=times) | pandas.Series |
# Import 311 CARE/CARE+ Requests and clean
import numpy as np
import pandas as pd
import geopandas as gpd
import intake
from shapely.geometry import Point
import boto3
catalog = intake.open_catalog('./catalogs/*.yml')
bucket_name = 's3://public-health-dashboard/'
s3 = boto3.client('s3')
df = catalog.care311.read()
for col in ['createddate', 'updateddate', 'closeddate', 'servicedate']:
df[col] = | pd.to_datetime(df[col]) | pandas.to_datetime |
"""
A module for parsing information from various files.
"""
import os
import re
from typing import Dict, List, Match, Optional, Tuple, Union
import numpy as np
import pandas as pd
import qcelemental as qcel
from arkane.exceptions import LogError
from arkane.ess import ess_factory, GaussianLog, MolproLog, OrcaLog, QChemLog, TeraChemLog
from arc.common import determine_ess, get_close_tuple, get_logger, is_same_pivot
from arc.exceptions import InputError, ParserError
from arc.species.converter import str_to_xyz, xyz_from_data
logger = get_logger()
def parse_frequencies(path: str,
software: str,
) -> np.ndarray:
"""
Parse the frequencies from a freq job output file.
Args:
path (str): The log file path.
software (str): The ESS.
Returns: np.ndarray
The parsed frequencies (in cm^-1).
"""
lines = _get_lines_from_file(path)
freqs = np.array([], np.float64)
if software.lower() == 'qchem':
for line in lines:
if ' Frequency:' in line:
items = line.split()
for i, item in enumerate(items):
if i:
freqs = np.append(freqs, [(float(item))])
elif software.lower() == 'gaussian':
with open(path, 'r') as f:
line = f.readline()
while line != '':
# this line intends to only capture the last occurrence of the frequencies
if 'and normal coordinates' in line:
freqs = np.array([], np.float64)
if 'Frequencies --' in line:
freqs = np.append(freqs, [float(frq) for frq in line.split()[2:]])
line = f.readline()
elif software.lower() == 'molpro':
read = False
for line in lines:
if 'Nr' in line and '[1/cm]' in line:
continue
if read:
if line == os.linesep:
read = False
continue
freqs = np.append(freqs, [float(line.split()[-1])])
if 'Low' not in line and 'Vibration' in line and 'Wavenumber' in line:
read = True
elif software.lower() == 'orca':
with open(path, 'r') as f:
line = f.readline()
read = True
while line:
if 'VIBRATIONAL FREQUENCIES' in line:
while read:
if not line.strip():
line = f.readline()
elif not line.split()[0] == '0:':
line = f.readline()
else:
read = False
while line.strip():
if float(line.split()[1]) != 0.0:
freqs = np.append(freqs, [float(line.split()[1])])
line = f.readline()
break
else:
line = f.readline()
elif software.lower() == 'terachem':
read_output = False
for line in lines:
if '=== Mode' in line:
# example: '=== Mode 1: 1198.526 cm^-1 ==='
freqs = np.append(freqs, [float(line.split()[3])])
elif 'Vibrational Frequencies/Thermochemical Analysis After Removing Rotation and Translation' in line:
read_output = True
continue
elif read_output:
if 'Temperature (Kelvin):' in line or 'Frequency(cm-1)' in line:
continue
if not line.strip():
break
# example:
# 'Mode Eigenvalue(AU) Frequency(cm-1) Intensity(km/mol) Vib.Temp(K) ZPE(AU) ...'
# ' 1 0.0331810528 170.5666870932 52.2294230772 245.3982965841 0.0003885795 ...'
freqs = np.append(freqs, [float(line.split()[2])])
else:
raise ParserError(f'parse_frequencies() can currently only parse Gaussian, Molpro, Orca, QChem and TeraChem '
f'files, got {software}')
logger.debug(f'Using parser.parse_frequencies(). Determined frequencies are: {freqs}')
return freqs
def parse_normal_displacement_modes(path: str,
software: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Parse frequencies and normal displacement modes.
Args:
path (str): The path to the log file.
software (str, optional): The software to used to generate the log file.
Raises:
NotImplementedError: If the parser is not implemented for the ESS this log file belongs to.
Returns: Tuple[np.ndarray, np.ndarray]
The frequencies (in cm^-1) and The normal displacement modes.
"""
software = software or determine_ess(path)
freqs, normal_disp_modes, normal_disp_modes_entries = list(), list(), list()
num_of_freqs_per_line = 3
with open(path, 'r') as f:
lines = f.readlines()
if software == 'gaussian':
parse, parse_normal_disp_modes = False, False
for line in lines:
if 'Harmonic frequencies (cm**-1)' in line:
# e.g.: Harmonic frequencies (cm**-1), IR intensities (KM/Mole), Raman scattering
parse = True
if parse and len(line.split()) in [0, 1, 3]:
parse_normal_disp_modes = False
normal_disp_modes.extend(normal_disp_modes_entries)
normal_disp_modes_entries = list()
if parse and 'Frequencies --' in line:
# e.g.: Frequencies -- -18.0696 127.6948 174.9499
splits = line.split()
freqs.extend(float(freq) for freq in splits[2:])
num_of_freqs_per_line = len(splits) - 2
normal_disp_modes_entries = list()
elif parse_normal_disp_modes:
# parsing, e.g.:
# Atom AN X Y Z X Y Z X Y Z
# 1 6 -0.00 0.00 -0.09 -0.00 0.00 -0.18 0.00 -0.00 -0.16
# 2 7 -0.00 0.00 -0.10 0.00 -0.00 0.02 0.00 -0.00 0.26
splits = line.split()[2:]
for i in range(num_of_freqs_per_line):
if len(normal_disp_modes_entries) < i + 1:
normal_disp_modes_entries.append(list())
normal_disp_modes_entries[i].append(splits[3 * i: 3 * i + 3])
elif parse and 'Atom AN X Y Z' in line:
parse_normal_disp_modes = True
elif parse and not line or '-------------------' in line:
parse = False
else:
raise NotImplementedError(f'parse_normal_displacement_modes is currently not implemented for {software}.')
freqs = np.array(freqs, np.float64)
normal_disp_modes = np.array(normal_disp_modes, np.float64)
return freqs, normal_disp_modes
def parse_geometry(path: str) -> Optional[Dict[str, tuple]]:
"""
Parse the xyz geometry from an ESS log file.
Args:
path (str): The ESS log file to parse from.
Returns: Optional[Dict[str, tuple]]
The cartesian geometry.
"""
log = ess_factory(fullpath=path)
try:
coords, number, _ = log.load_geometry()
except LogError:
logger.debug(f'Could not parse xyz from {path}')
# try parsing Gaussian standard orientation instead of the input orientation parsed by Arkane
lines = _get_lines_from_file(path)
xyz_str = ''
for i in range(len(lines)):
if 'Standard orientation:' in lines[i]:
xyz_str = ''
j = i
while len(lines) and not lines[j].split()[0].isdigit():
j += 1
while len(lines) and '-------------------' not in lines[j]:
splits = lines[j].split()
xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\n'
j += 1
break
if xyz_str:
return str_to_xyz(xyz_str)
return None
return xyz_from_data(coords=coords, numbers=number)
def parse_t1(path: str) -> Optional[float]:
"""
Parse the T1 parameter from a Molpro or Orca coupled cluster calculation.
Args:
path (str): The ess log file path.
Returns: Optional[float]
The T1 parameter.
"""
if not os.path.isfile(path):
raise InputError('Could not find file {0}'.format(path))
log = ess_factory(fullpath=path)
try:
t1 = log.get_T1_diagnostic()
except (LogError, NotImplementedError):
logger.warning('Could not read t1 from {0}'.format(path))
t1 = None
return t1
def parse_e_elect(path: str,
zpe_scale_factor: float = 1.,
) -> Optional[float]:
"""
Parse the electronic energy from an sp job output file.
Args:
path (str): The ESS log file to parse from.
zpe_scale_factor (float): The ZPE scaling factor, used only for composite methods in Gaussian via Arkane.
Returns: Optional[float]
The electronic energy in kJ/mol.
"""
if not os.path.isfile(path):
raise InputError(f'Could not find file {path}')
log = ess_factory(fullpath=path)
try:
e_elect = log.load_energy(zpe_scale_factor) * 0.001 # convert to kJ/mol
except (LogError, NotImplementedError):
logger.warning(f'Could not read e_elect from {path}')
e_elect = None
return e_elect
def parse_zpe(path: str) -> Optional[float]:
"""
Determine the calculated ZPE from a frequency output file
Args:
path (str): The path to a frequency calculation output file.
Returns: Optional[float]
The calculated zero point energy in kJ/mol.
"""
if not os.path.isfile(path):
raise InputError('Could not find file {0}'.format(path))
log = ess_factory(fullpath=path)
try:
zpe = log.load_zero_point_energy() * 0.001 # convert to kJ/mol
except (LogError, NotImplementedError):
logger.warning('Could not read zpe from {0}'.format(path))
zpe = None
return zpe
def parse_1d_scan_energies(path: str) -> Tuple[Optional[List[float]], Optional[List[float]]]:
"""
Parse the 1D torsion scan energies from an ESS log file.
Args:
path (str): The ESS log file to parse from.
Raises:
InputError: If ``path`` is invalid.
Returns: Tuple[Optional[List[float]], Optional[List[float]]]
The electronic energy in kJ/mol and the dihedral scan angle in degrees.
"""
if not os.path.isfile(path):
raise InputError(f'Could not find file {path}')
log = ess_factory(fullpath=path)
try:
energies, angles = log.load_scan_energies()
energies *= 0.001 # convert to kJ/mol
angles *= 180 / np.pi # convert to degrees
except (LogError, NotImplementedError, ZeroDivisionError):
logger.warning(f'Could not read energies from {path}')
energies, angles = None, None
return energies, angles
def parse_1d_scan_coords(path: str) -> List[Dict[str, tuple]]:
"""
Parse the 1D torsion scan coordinates from an ESS log file.
Args:
path (str): The ESS log file to parse from.
Returns: list
The Cartesian coordinates.
"""
lines = _get_lines_from_file(path)
log = ess_factory(fullpath=path)
if not isinstance(log, GaussianLog):
raise NotImplementedError(f'Currently parse_1d_scan_coords only supports Gaussian files, got {type(log)}')
traj = list()
done = False
i = 0
while not done:
if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:
done = True
elif 'Optimization completed' in lines[i]:
while len(lines) and 'Input orientation:' not in lines[i]:
i += 1
i += 5
xyz_str = ''
while len(lines) and '--------------------------------------------' not in lines[i]:
splits = lines[i].split()
xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\n'
i += 1
traj.append(str_to_xyz(xyz_str))
i += 1
return traj
def parse_nd_scan_energies(path: str,
software: Optional[str] = None,
return_original_dihedrals: bool = False,
) -> Tuple[dict, Optional[List[float]]]:
"""
Parse the ND torsion scan energies from an ESS log file.
Args:
path (str): The ESS log file to parse from.
software (str, optional): The software used to run this scan, default is 'gaussian'.
return_original_dihedrals (bool, optional): Whether to return the dihedral angles of the original conformer.
``True`` to return, default is ``False``.
Raises:
InputError: If ``path`` is invalid.
Returns: Tuple[dict, Optional[List[float]]]
The "results" dictionary, which has the following structure::
results = {'directed_scan_type': <str, used for the fig name>,
'scans': <list, entries are lists of torsion indices>,
'directed_scan': <dict, keys are tuples of '{0:.2f}' formatted dihedrals,
values are dictionaries with the following keys and values:
{'energy': <float, energy in kJ/mol>, * only this is used here
'xyz': <dict>,
'is_isomorphic': <bool>,
'trsh': <list, job.ess_trsh_methods>}>
},
The dihedrals angles of the original conformer
"""
software = software or determine_ess(path)
results = {'directed_scan_type': f'ess_{software}',
'scans': list(),
'directed_scan': dict(),
}
if software == 'gaussian':
# internal variables:
# - scan_d_dict (dict): keys are scanning dihedral names (e.g., 'D2', or 'D4'), values are the corresponding
# torsion indices tuples (e.g., (4, 1, 2, 5), or (4, 1, 3, 6)).
# - dihedrals_dict (dict): keys are torsion tuples (e.g., (4, 1, 2, 5), or (4, 1, 3, 6)),
# values are lists of dihedral angles in degrees corresponding to the torsion
# (e.g., [-159.99700, -149.99690, -139.99694, -129.99691, -119.99693]).
# - torsions (list): entries are torsion indices that are scanned, e.g.: [(4, 1, 2, 5), (4, 1, 3, 6)]
with open(path, 'r', buffering=8192) as f:
line = f.readline()
symbols, torsions, shape, resolution, original_dihedrals = list(), list(), list(), list(), list()
scan_d_dict = dict()
min_e = None
while line:
line = f.readline()
if 'The following ModRedundant input section has been read:' in line:
# ' The following ModRedundant input section has been read:'
# ' D 4 1 2 5 S 36 10.000'
# ' D 4 1 3 6 S 36 10.000'
line = f.readline()
while True:
splits = line.split()
if len(splits) == 8:
torsions.append(tuple([int(index) for index in splits[1:5]]))
shape.append(int(splits[6]) + 1) # the last point is repeated
resolution.append(float(splits[7]))
else:
break
line = f.readline()
results['scans'] = torsions
if 'Symbolic Z-matrix:' in line:
# ---------------------
# HIR calculation by AI
# ---------------------
# Symbolic Z-matrix:
# Charge = 0 Multiplicity = 1
# c
# o 1 oc2
# o 1 oc3 2 oco3
# o 1 oc4 2 oco4 3 dih4 0
# h 2 ho5 1 hoc5 3 dih5 0
# h 3 ho6 1 hoc6 4 dih6 0
# Variables:
# oc2 1.36119
# oc3 1.36119
# oco3 114.896
# oc4 1.18581
# oco4 122.552
# dih4 180.
# ho5 0.9637
# hoc5 111.746
# dih5 20.003
# ho6 0.9637
# hoc6 111.746
# dih6 -160.
for i in range(2):
f.readline()
while 'Variables' not in line:
symbols.append(line.split()[0].upper())
line = f.readline()
if 'Initial Parameters' in line:
# ----------------------------
# ! Initial Parameters !
# ! (Angstroms and Degrees) !
# -------------------------- --------------------------
# ! Name Definition Value Derivative Info. !
# --------------------------------------------------------------------------------
# ! R1 R(1,2) 1.3612 calculate D2E/DX2 analytically !
# ! R2 R(1,3) 1.3612 calculate D2E/DX2 analytically !
# ! R3 R(1,4) 1.1858 calculate D2E/DX2 analytically !
# ! R4 R(2,5) 0.9637 calculate D2E/DX2 analytically !
# ! R5 R(3,6) 0.9637 calculate D2E/DX2 analytically !
# ! A1 A(2,1,3) 114.896 calculate D2E/DX2 analytically !
# ! A2 A(2,1,4) 122.552 calculate D2E/DX2 analytically !
# ! A3 A(3,1,4) 122.552 calculate D2E/DX2 analytically !
# ! A4 A(1,2,5) 111.746 calculate D2E/DX2 analytically !
# ! A5 A(1,3,6) 111.746 calculate D2E/DX2 analytically !
# ! D1 D(3,1,2,5) 20.003 calculate D2E/DX2 analytically !
# ! D2 D(4,1,2,5) -159.997 Scan !
# ! D3 D(2,1,3,6) 20.0 calculate D2E/DX2 analytically !
# ! D4 D(4,1,3,6) -160.0 Scan !
# --------------------------------------------------------------------------------
for i in range(5):
line = f.readline()
# original_zmat = {'symbols': list(), 'coords': list(), 'vars': dict()}
while '--------------------------' not in line:
splits = line.split()
# key = splits[2][:-1].replace('(', '_').replace(',', '_')
# val = float(splits[3])
# original_zmat['symbols'].append(symbols[len(original_zmat['symbols'])])
# original_zmat['vars'][key] = val
if 'Scan' in line:
scan_d_dict[splits[1]] = \
tuple([int(index) for index in splits[2][2:].replace(')', '').split(',')])
original_dihedrals.append(float(splits[3]))
line = f.readline()
elif 'Summary of Optimized Potential Surface Scan' in line:
# ' Summary of Optimized Potential Surface Scan (add -264.0 to energies):'
base_e = float(line.split('(add ')[1].split()[0])
energies, dihedrals_dict = list(), dict()
dihedral_num = 0
while 'Grad' not in line:
line = f.readline()
splits = line.split()
if 'Eigenvalues --' in line:
# convert Hartree energy to kJ/mol
energies = [(base_e + float(e)) * 4.3597447222071e-18 * 6.02214179e23 * 1e-3
for e in splits[2:]]
min_es = min(energies)
min_e = min_es if min_e is None else min(min_e, min_es)
dihedral_num = 0
if splits[0] in list(scan_d_dict.keys()) \
and scan_d_dict[splits[0]] not in list(dihedrals_dict.keys()):
# parse the dihedral information
# ' D1 20.00308 30.00361 40.05829 50.36777 61.07341'
# ' D2 -159.99700-149.99690-139.99694-129.99691-119.99693'
# ' D3 19.99992 19.99959 19.94509 19.63805 18.93967'
# ' D4 -160.00000-159.99990-159.99994-159.99991-159.99993'
dihedrals = [float(dihedral) for dihedral in line.replace('-', ' -').split()[1:]]
for i in range(len(dihedrals)):
if 0 > dihedrals[i] >= -0.0049999:
dihedrals[i] = 0.0
dihedrals_dict[scan_d_dict[splits[0]]] = dihedrals
dihedral_num += 1
if len(list(dihedrals_dict.keys())) == len(list(scan_d_dict.keys())):
# we have all the data for this block, pass to ``results`` and initialize ``dihedrals_dict``
for i, energy in enumerate(energies):
dihedral_list = [dihedrals_dict[torsion][i] for torsion in torsions] # ordered
key = tuple(f'{dihedral:.2f}' for dihedral in dihedral_list)
# overwrite previous values for a close key if found:
key = get_close_tuple(key, results['directed_scan'].keys()) or key
results['directed_scan'][key] = {'energy': energy}
dihedrals_dict = dict() # keys are torsion tuples, values are dihedral angles
break
line = f.readline()
else:
raise NotImplementedError(f'parse_nd_scan_energies is currently only implemented for Gaussian, got {software}.')
for key in results['directed_scan'].keys():
results['directed_scan'][key] = {'energy': results['directed_scan'][key]['energy'] - min_e}
if return_original_dihedrals:
return results, original_dihedrals
else:
return results, None
def parse_xyz_from_file(path: str) -> Optional[Dict[str, tuple]]:
"""
Parse xyz coordinated from:
- .xyz: XYZ file
- .gjf: Gaussian input file
- .out or .log: ESS output file (Gaussian, Molpro, Orca, QChem, TeraChem) - calls parse_geometry()
- other: Molpro or QChem input file
Args:
path (str): The file path.
Raises:
ParserError: If the coordinates could not be parsed.
Returns: Optional[Dict[str, tuple]]
The parsed cartesian coordinates.
"""
lines = _get_lines_from_file(path)
file_extension = os.path.splitext(path)[1]
xyz = None
relevant_lines = list()
if file_extension == '.xyz':
for i, line in enumerate(reversed(lines)):
splits = line.strip().split()
if len(splits) == 1 and all([c.isdigit() for c in splits[0]]):
# this is the last number of atoms line (important when parsing trajectories)
num_of_atoms = int(splits[0])
break
else:
raise ParserError(f'Could not identify the number of atoms line in the xyz file {path}')
index = len(lines) - i - 1
relevant_lines = lines[index + 2: index + 2 + num_of_atoms]
elif file_extension == '.gjf':
start_parsing = False
for line in lines:
if start_parsing and line and line != '\n' and line != '\r\n':
relevant_lines.append(line)
elif start_parsing:
break
else:
splits = line.split()
if len(splits) == 2 and all([s.isdigit() for s in splits]):
start_parsing = True
elif 'out' in file_extension or 'log' in file_extension:
xyz = parse_geometry(path)
else:
record = False
for line in lines:
if '$end' in line or '}' in line:
break
if record and len(line.split()) == 4:
relevant_lines.append(line)
elif '$molecule' in line:
record = True
elif 'geometry={' in line:
record = True
if not relevant_lines:
raise ParserError(f'Could not parse xyz coordinates from file {path}')
if xyz is None and relevant_lines:
xyz = str_to_xyz(''.join([line for line in relevant_lines if line]))
return xyz
def parse_trajectory(path: str) -> List[Dict[str, tuple]]:
"""
Parse all geometries from an xyz trajectory file or an ESS output file.
Args:
path (str): The file path.
Raises:
ParserError: If the trajectory could not be read.
Returns: List[Dict[str, tuple]]
Entries are xyz's on the trajectory.
"""
lines = _get_lines_from_file(path)
ess_file = False
if path.split('.')[-1] != 'xyz':
try:
log = ess_factory(fullpath=path)
ess_file = True
except InputError:
ess_file = False
if ess_file:
if not isinstance(log, GaussianLog):
raise NotImplementedError(f'Currently parse_trajectory only supports Gaussian files, got {type(log)}')
traj = list()
done = False
i = 0
while not done:
if i >= len(lines) or 'Normal termination of Gaussian' in lines[i] or 'Error termination via' in lines[i]:
done = True
elif 'Input orientation:' in lines[i]:
i += 5
xyz_str = ''
while len(lines) and '--------------------------------------------' not in lines[i]:
splits = lines[i].split()
xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\n'
i += 1
traj.append(str_to_xyz(xyz_str))
i += 1
else:
# this is not an ESS output file, probably an XYZ format file with several Cartesian coordinates
skip_line = False
num_of_atoms = 0
traj, xyz_lines = list(), list()
for line in lines:
splits = line.strip().split()
if len(splits) == 1 and all([c.isdigit() for c in splits[0]]):
if len(xyz_lines):
if len(xyz_lines) != num_of_atoms:
raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '
f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')
traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))
num_of_atoms = int(splits[0])
skip_line = True
xyz_lines = list()
elif skip_line:
# skip the comment line
skip_line = False
continue
else:
xyz_lines.append(line)
if len(xyz_lines):
# add the last point in the trajectory
if len(xyz_lines) != num_of_atoms:
raise ParserError(f'Could not parse trajectory, expected {num_of_atoms} atoms, '
f'but got {len(xyz_lines)} for point {len(traj) + 1} in the trajectory.')
traj.append(str_to_xyz(''.join([xyz_line for xyz_line in xyz_lines])))
if not len(traj):
raise ParserError(f'Could not parse trajectory from {path}')
return traj
def parse_dipole_moment(path: str) -> Optional[float]:
"""
Parse the dipole moment in Debye from an opt job output file.
Args:
path: The ESS log file.
Returns: Optional[float]
The dipole moment in Debye.
"""
lines = _get_lines_from_file(path)
log = ess_factory(path)
dipole_moment = None
if isinstance(log, GaussianLog):
# example:
# Dipole moment (field-independent basis, Debye):
# X= -0.0000 Y= -0.0000 Z= -1.8320 Tot= 1.8320
read = False
for line in lines:
if 'dipole moment' in line.lower() and 'debye' in line.lower():
read = True
elif read:
dipole_moment = float(line.split()[-1])
read = False
elif isinstance(log, MolproLog):
# example: ' Dipole moment /Debye 2.96069859 0.00000000 0.00000000'
for line in lines:
if 'dipole moment' in line.lower() and '/debye' in line.lower():
splits = line.split()
dm_x, dm_y, dm_z = float(splits[-3]), float(splits[-2]), float(splits[-1])
dipole_moment = (dm_x ** 2 + dm_y ** 2 + dm_z ** 2) ** 0.5
elif isinstance(log, OrcaLog):
# example: 'Magnitude (Debye) : 2.11328'
for line in lines:
if 'Magnitude (Debye)' in line:
dipole_moment = float(line.split()[-1])
elif isinstance(log, QChemLog):
# example:
# Dipole Moment (Debye)
# X 0.0000 Y 0.0000 Z 2.0726
# Tot 2.0726
skip = False
read = False
for line in lines:
if 'dipole moment' in line.lower() and 'debye' in line.lower():
skip = True
elif skip:
skip = False
read = True
elif read:
dipole_moment = float(line.split()[-1])
read = False
elif isinstance(log, TeraChemLog):
# example: 'DIPOLE MOMENT: {-0.000178, -0.000003, -0.000019} (|D| = 0.000179) DEBYE'
for line in lines:
if 'dipole moment' in line.lower() and 'debye' in line.lower():
splits = line.split('{')[1].split('}')[0].replace(',', '').split()
dm_x, dm_y, dm_z = float(splits[0]), float(splits[1]), float(splits[2])
dipole_moment = (dm_x ** 2 + dm_y ** 2 + dm_z ** 2) ** 0.5
else:
raise ParserError('Currently dipole moments can only be parsed from either Gaussian, Molpro, Orca, QChem, '
'or TeraChem optimization output files')
if dipole_moment is None:
raise ParserError('Could not parse the dipole moment')
return dipole_moment
def parse_polarizability(path: str) -> Optional[float]:
"""
Parse the polarizability from a freq job output file, returns the value in Angstrom^3.
Args:
path: The ESS log file.
Returns: Optional[float]
The polarizability in Angstrom^3.
"""
lines = _get_lines_from_file(path)
polarizability = None
for line in lines:
if 'Isotropic polarizability for W' in line:
# example: Isotropic polarizability for W= 0.000000 11.49 Bohr**3.
# 1 Bohr = 0.529177 Angstrom
polarizability = float(line.split()[-2]) * 0.529177 ** 3
return polarizability
def _get_lines_from_file(path: str) -> List[str]:
"""
A helper function for getting a list of lines from a file.
Args:
path (str): The file path.
Raises:
InputError: If the file could not be read.
Returns: List[str]
Entries are lines from the file.
"""
if os.path.isfile(path):
with open(path, 'r') as f:
lines = f.readlines()
else:
raise InputError(f'Could not find file {path}')
return lines
def process_conformers_file(conformers_path: str) -> Tuple[List[Dict[str, tuple]], List[float]]:
"""
Parse coordinates and energies from an ARC conformers file of either species or TSs.
Args:
conformers_path (str): The path to an ARC conformers file
(either a "conformers_before_optimization" or
a "conformers_after_optimization" file).
Raises:
InputError: If the file could not be found.
Returns: Tuple[List[Dict[str, tuple]], List[float]]
Conformer coordinates in a dict format, the respective energies in kJ/mol.
"""
if not os.path.isfile(conformers_path):
raise InputError('Conformers file {0} could not be found'.format(conformers_path))
with open(conformers_path, 'r') as f:
lines = f.readlines()
xyzs, energies = list(), list()
line_index = 0
while line_index < len(lines):
if 'conformer' in lines[line_index] and ':' in lines[line_index] and lines[line_index].strip()[-2].isdigit():
xyz, energy = '', None
line_index += 1
while len(lines) and line_index < len(lines) and lines[line_index].strip() \
and 'SMILES' not in lines[line_index] \
and 'energy' not in lines[line_index].lower() \
and 'guess method' not in lines[line_index].lower():
xyz += lines[line_index]
line_index += 1
while len(lines) and line_index < len(lines) and 'conformer' not in lines[line_index]:
if 'relative energy:' in lines[line_index].lower():
energy = float(lines[line_index].split()[2])
line_index += 1
xyzs.append(str_to_xyz(xyz))
energies.append(energy)
else:
line_index += 1
return xyzs, energies
def parse_str_blocks(file_path: str,
head_pat: Union[Match, str],
tail_pat: Union[Match, str],
regex: bool = True,
tail_count: int = 1,
block_count: int = 1,
) -> List[str]:
"""
Return a list of blocks defined by the head pattern and the tail pattern.
Args:
file_path (str): The path to the readable file.
head_pat (str/regex): Str pattern or regular expression of the head of the block.
tail_pat (str/regex): Str pattern or regular expresion of the tail of the block.
regex (bool, optional): Use regex (True) or str pattern (False) to search.
tail_count (int, optional): The number of times that the tail repeats.
block_count (int, optional): The max number of blocks to search. -1 for any number.
Raises:
InputError: If the file could not be found.
Returns: List[str]
List of str blocks.
"""
if not os.path.isfile(file_path):
raise InputError('Could not find file {0}'.format(file_path))
with open(file_path, 'r') as f:
blks = []
# Different search mode
if regex:
def search(x, y):
return re.search(x, y)
else:
def search(x, y):
return x in y
# 'search' for the head or 'read' until the tail
mode = 'search'
line = f.readline()
while line != '':
if mode == 'search':
# Stop searching if found enough blocks
if (len(blks)) == block_count:
break
# Check if matching the head pattern
else:
match = search(head_pat, line)
# Switch to 'read' mode
if match:
tail_repeat = 0
mode = 'read'
blks.append([])
blks[-1].append(line)
elif mode == 'read':
blks[-1].append(line)
match = search(tail_pat, line)
if match:
tail_repeat += 1
# If see enough tail patterns, switch to 'search' mode
if tail_repeat == tail_count:
mode = 'search'
line = f.readline()
# Remove the last incomplete search
if len(blks) > 0 and (tail_repeat != tail_count):
blks.pop()
return blks
def parse_scan_args(file_path: str) -> dict:
"""
Get the scan arguments, including which internal coordinates (IC) are being scanned, which are frozen,
what is the step size and the number of atoms, etc.
Args:
file_path (str): The path to a readable output file.
Raises:
NotImplementedError: If files other than Gaussian log is input
Returns: dict
A dictionary that contains the scan arguments as well as step number, step size, number of atom::
{'scan': <list, atom indexes of the torsion to be scanned>,
'freeze': <list, list of internal coordinates identified by atom indexes>,
'step': <int, number of steps to scan>,
'step_size': <float, the size of each step>,
'n_atom': <int, the number of atoms of the molecule>,
}
"""
log = ess_factory(fullpath=file_path)
scan_args = {'scan': None, 'freeze': [],
'step': 0, 'step_size': 0, 'n_atom': 0}
if isinstance(log, GaussianLog):
try:
# g09, g16
scan_blk = parse_str_blocks(file_path, 'The following ModRedundant input section has been read:',
'Isotopes and Nuclear Properties', regex=False)[0][1:-1]
except IndexError: # Cannot find any block
# g03
scan_blk_1 = parse_str_blocks(file_path, 'The following ModRedundant input section has been read:',
'GradGradGradGrad', regex=False)[0][1:-2]
scan_blk_2 = parse_str_blocks(file_path, 'NAtoms=',
'One-electron integrals computed', regex=False)[0][:1]
scan_blk = scan_blk_1 + scan_blk_2
scan_pat = r'[DBA]?(\s+\d+){2,4}\s+S\s+\d+[\s\d.]+'
frz_pat = r'[DBA]?(\s+\d+){2,4}\s+F'
value_pat = r'[\d.]+'
for line in scan_blk:
if re.search(scan_pat, line.strip()):
values = re.findall(value_pat, line)
scan_len = len(values) - 2 # atom indexes + step + stepsize
scan_args['scan'] = [int(values[i]) for i in range(scan_len)]
scan_args['step'] = int(values[-2])
scan_args['step_size'] = float(values[-1])
if re.search(frz_pat, line.strip()):
values = re.findall(value_pat, line)
scan_args['freeze'].append([int(values[i]) for i in range(len(values))])
if 'NAtoms' in line:
scan_args['n_atom'] = int(line.split()[1])
else:
raise NotImplementedError(f'parse_scan_args() can currently only parse Gaussian output '
f'files, got {log}')
return scan_args
def parse_ic_info(file_path: str) -> pd.DataFrame:
"""
Get the information of internal coordinates (ic) of an intermediate scan conformer.
Args:
file_path (str): The path to a readable output file.
Raises:
NotImplementedError: If files other than Gaussian log is input
Returns: pd.DataFrame
A DataFrame containing the information of the internal coordinates
"""
log = ess_factory(fullpath=file_path)
ic_dict = {item: []
for item in ['label', 'type', 'atoms', 'redundant', 'scan']}
scan_args = parse_scan_args(file_path)
max_atom_ind = scan_args['n_atom']
if isinstance(log, GaussianLog):
ic_info_block = parse_str_blocks(file_path, 'Initial Parameters', '-----------', regex=False,
tail_count=3)[0][5:-1]
for line in ic_info_block:
# Line example with split() indices:
# 0 1 2 3 4 5 6 7
# ! R1 R(1, 2) 1.3581 calculate D2E/DX2 analytically !
terms = line.split()
ic_dict['label'].append(terms[1])
ic_dict['type'].append(terms[1][0]) # 'R: bond, A: angle, D: dihedral
atom_inds = re.split(r'[(),]', terms[2])[1:-1]
ic_dict['atoms'].append([int(atom_ind) for atom_ind in atom_inds])
# Identify redundant, cases like 5 atom angles or redundant atoms
if (ic_dict['type'][-1] == 'A' and len(atom_inds) > 3) \
or (ic_dict['type'][-1] == 'R' and len(atom_inds) > 2) \
or (ic_dict['type'][-1] == 'D' and len(atom_inds) > 4):
ic_dict['redundant'].append(True)
else:
# Sometimes, redundant atoms with weird indices are added.
# Reason unclear. Maybe to better define the molecule, or to
# solve equations more easily.
weird_indices = [index for index in ic_dict['atoms'][-1]
if index <= 0 or index > max_atom_ind]
if weird_indices:
ic_dict['redundant'].append(True)
else:
ic_dict['redundant'].append(False)
# Identify ics being scanned
if len(scan_args['scan']) == len(atom_inds) == 4 \
and is_same_pivot(scan_args['scan'], ic_dict['atoms'][-1]):
ic_dict['scan'].append(True)
elif len(scan_args['scan']) == len(atom_inds) == 2 \
and set(scan_args['scan']) == set(ic_dict['atoms'][-1]):
ic_dict['scan'].append(True)
else:
# Currently doesn't support scan of angles
ic_dict['scan'].append(False)
else:
raise NotImplementedError(f'parse_ic_info() can currently only parse Gaussian output '
f'files, got {log}')
ic_info = | pd.DataFrame.from_dict(ic_dict) | pandas.DataFrame.from_dict |
"""Module to run a basic decision tree model
Author(s):
<NAME> (<EMAIL>)
"""
import pandas as pd
import numpy as np
import logging
from sklearn import preprocessing
from primrose.base.transformer import AbstractTransformer
class ExplicitCategoricalTransform(AbstractTransformer):
DEFAULT_NUMERIC = -9999
def __init__(self, categoricals):
"""initialize the ExplicitCategoricalTransform
Args:
categoricals: dictionary containing for each column to be transformed:
- transformations: list of strings to be executed on the data ('x' represents the current categorical variable)
- rename: if present, rename the current categorical variable to that name
- to_numeric: if true, attempt to apply to_numeric after previous transformations
"""
self.categoricals = categoricals
def fit(self, data):
pass
@staticmethod
def _process_transformations(data, input_data, categorical, x):
"""transform a column
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
categorical (str): varible name
x (str): transformation string
Returns:
data (dataframe)
"""
if "transformations" in input_data.keys():
logging.info(
"Applying key {} to variable {}".format("transformations", categorical)
)
for transformation in input_data["transformations"]:
exec(transformation.format(x=x))
@staticmethod
def _process_rename(data, input_data, categorical):
"""rename a field
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
categorical (str): varible name
Returns:
(tuple): tuple containing:
data (dataframe): dataframe
name (str): original name (if not "to_numeric": True), new_name otherwise
"""
if "rename" in input_data.keys():
logging.info("Applying key {} to variable {}".format("rename", categorical))
data = data.rename({categorical: input_data["rename"]}, axis="columns")
return data, input_data["rename"]
return data, categorical
@staticmethod
def _process_numeric(data, input_data, name):
"""convert column to numeric
Args:
data (dataframe): dataframe
input configuration (JSON): JSON categorical config for this variable
name (str): field name
Returns:
data with the colun converted to numeric
"""
if input_data.get("to_numeric", False):
logging.info("Applying key {} to variable {}".format("to_numeric", name))
# if there are errors converting to numerical values, we need to sub in a reasonable value
if sum(pd.to_numeric(data[name], errors="coerce").isnull()) > 0:
logging.info(
"Can't convert these entries in {}. Replacing with {}: {}".format(
name,
ExplicitCategoricalTransform.DEFAULT_NUMERIC,
np.unique(
data[name][
pd.to_numeric(data[name], errors="coerce").isnull()
].astype(str)
),
)
)
data[name][
pd.to_numeric(data[name], errors="coerce").isnull()
] = ExplicitCategoricalTransform.DEFAULT_NUMERIC
try:
data[name] = | pd.to_numeric(data[name]) | pandas.to_numeric |
#!/usr/bin/env python3
"""Pastrami - Population scale haplotype copying script"""
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2021, <NAME>, <NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "0.3"
__maintainer__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>; <EMAIL>"
__status__ = "Development"
__title__ = "pastrami.py"
# Standard modules
import logging
import math
import os.path
import pickle
import random
import re
import shlex
import shutil
import statistics
import string
import subprocess
import sys
from argparse import ArgumentParser, HelpFormatter
py_version = sys.version_info
if py_version[0] < 3 or py_version[1] < 4:
sys.exit(f"Error: {__title__} requires Python version 3.4+ to work. Please install a newer version of Python.")
# Additional installs
try:
import numpy as np
except ModuleNotFoundError as err:
sys.exit(f"Error: Numpy not found. Please install numpy prior to running {__title__}")
try:
from scipy.optimize import minimize
except ModuleNotFoundError as err:
sys.exit(f"Error: Scipy not found. Please install scipy prior to running {__title__}")
try:
import pandas as pd
except ModuleNotFoundError as err:
sys.exit(f"Error: Pandas not found. Please install pandas prior to running {__title__}")
try:
import pathos.multiprocessing as mp
except ModuleNotFoundError as err:
sys.exit(f"Error: Pathos not found. Please install pathos prior to running {__title__}")
VERSION = __version__
PROGRAM_NAME = __title__
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Support:
@staticmethod
def error_out(message: str = None):
if message is not None:
sys.exit(Colors.FAIL + f"Error: {message}" + Colors.ENDC)
else:
sys.exit(Colors.FAIL + "The program encountered an error and has to exit." + Colors.ENDC)
@staticmethod
def validate_file(the_file: str):
return os.path.isfile(the_file)
@staticmethod
def validate_file_size(the_file: str, fake_run: str = False):
if fake_run:
return True
else:
return os.stat(the_file).st_size > 0
@staticmethod
def validate_file_and_size_or_error(the_file: str, error_prefix: str = 'The file',
presence_suffix: str = 'doesn\'t exist',
size_suffix: str = 'is size 0', fake_run: bool = False):
if not Support.validate_file(the_file=the_file) and not fake_run:
error_message = ' '.join([error_prefix, the_file, presence_suffix])
Support.error_out(message=error_message)
if not Support.validate_file_size(the_file=the_file) and not fake_run:
error_message = ' '.join([error_prefix, the_file, size_suffix])
Support.error_out(message=error_message)
@staticmethod
def validate_dir(the_dir: str):
return os.path.isdir(the_dir)
# TODO: This is a hastily written method, needs error fixing
@staticmethod
def validate_dir_or_error(the_dir: str, error_prefix: str = "The dir", presence_suffix: str = "doesn't exist",
fake_run: bool = False):
if not Support.validate_dir(the_dir=the_dir) and not fake_run:
error_message = ' '.join([error_prefix, the_dir, presence_suffix])
Support.error_out(message=error_message)
# TODO: Implement checks for dependency progams
@staticmethod
def check_dependencies(program_list: list = None) -> list:
errors = []
for program in program_list:
if shutil.which(program) is None:
errors.append(program)
return errors
@staticmethod
def find_plink_binary():
if shutil.which("plink") is not None:
return "plink"
elif shutil.which("plink2") is not None:
return "plink2"
else:
return None
@staticmethod
def run_command(command_str: str = None, command_list: list = None, shell=False):
if command_str is None and command_list is None:
raise ValueError("Support.run_command() was called without any command to execute.")
try:
if command_str is not None:
logging.info(f"Attempting to run: {command_str}")
output = subprocess.check_output(shlex.split(command_str), encoding="utf-8", shell=shell)
else:
logging.info(f"Attempting to run: " + " ".join([str(x) for x in command_list]))
output = subprocess.check_output(command_list, encoding="utf-8", shell=shell)
except subprocess.CalledProcessError as e:
logging.error(f"Encountered an error executing the command: ")
if command_str is not None:
logging.error(command_str)
else:
logging.error(command_list)
logging.error(f"Error details:")
logging.error(f"Exit code={e.returncode}")
logging.error(f"Error message={e.output}")
sys.exit(1)
# logging.info(f"Command output = {output}")
logging.info("Command executed without raising any exceptions")
return output
@staticmethod
def validate_filename(filename: str):
if re.match(r"^[a-zA-Z0-9_.-]+$", filename):
return True
else:
return False
@staticmethod
def validate_output_prefix(out_prefix: str):
parent, prefix = os.path.split(out_prefix)
if parent != "":
if not Support.validate_dir(parent):
Support.safe_dir_create(parent)
return Support.validate_filename(prefix)
@staticmethod
def safe_dir_create(this_dir: str):
try:
os.makedirs(this_dir)
except IOError:
print(f"I don't seem to have access to output prefix directory. Are the permissions correct?")
sys.exit(1)
@staticmethod
def safe_dir_rm(this_dir: str):
try:
os.rmdir(this_dir)
except IOError:
print(f"I don't seem to have access to output prefix directory. Are the permissions correct?")
sys.exit(1)
@staticmethod
def merge_fam_files(infile1: str, infile2: str, outputfile: str):
"""Merged two TFAM files into a single one (for aggregate function)
Parameters
----------
infile1 : str
Input TFAM file #1 (e.g., reference TFAM file)
infile2: str
Input TFAM file #2 (e.g., query TFAM file)
outputfile: str
Output TFAM file
Returns
-------
None
"""
with open(outputfile, "w") as out_handle:
with open(infile1, "r") as infile1_handle:
for line in infile1_handle:
out_handle.write(line)
with open(infile2, "r") as infile2_handle:
for line in infile2_handle:
out_handle.write(line)
@staticmethod
def create_pop_group_from_tfam(tfam_in_file: str, tsv_out_file: str):
"""Takes unique population names from the input file and make them as group
Parameters
----------
tfam_in_file : str
Input TFAM file
tsv_out_file: str
Output TSV file
Returns
-------
None
"""
unique_populations = {}
with open(tfam_in_file, "r") as f_in:
for line in f_in:
pop = line.strip().split()[0]
unique_populations[pop] = True
unique_populations = sorted(unique_populations.keys())
with open(tsv_out_file, "r") as f_out:
f_out.write("#Population\tGroup\n")
for this_pop in unique_populations:
f_out.write(f"{this_pop}\t{this_pop}\n")
@staticmethod
def init_logger(log_file, verbosity):
"""Configures the logging for printing
Returns
-------
None
Logger behavior is set based on the Inputs variable
"""
try:
logging.basicConfig(filename=log_file, filemode="w", level=logging.DEBUG,
format=f"[%(asctime)s] %(message)s",
datefmt="%m-%d-%Y %I:%M:%S %p")
except FileNotFoundError:
print(f"The supplied location for the log file '{log_file}'" +
f"doesn't exist. Please check if the location exists.")
sys.exit(1)
except IOError:
print(f"I don't seem to have access to make the log file." +
f"Are the permissions correct or is there a directory with the same name?")
sys.exit(1)
if verbosity:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=f"[%(asctime)s] %(message)s", datefmt="%m-%d-%Y %I:%M:%S %p")
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
class Analysis:
chromosomes = list(range(1, 23))
fake_run = False
debug = True
min_haplotype_occurences = 0
optim_step_size = 0.0001
error_threshold = 1e-8
optim_iterations = 10
tolerance = 1e-8
ancestry_fraction_postfix = "_fractions.Q"
ancestry_painting_postfix = "_paintings.Q"
pop_estimates_postfix = "_estimates.Q"
outsourced_optimizer_pop_estimates_postfix = "_outsourcedOptimizer_estimates.Q"
finegrain_estimates_postfix = "_fine_grain_estimates.Q"
program_list = {'required': ["plink"]}
def __init__(self, opts):
# General attributes
self.threads = opts.threads
self.log_file = opts.log_file
self.verbosity = opts.verbosity
self.pool = None
# Any errors we encounter
self.errors = []
# The actual queue for the analysis
self.analysis = []
# Verbosity levels and colors
self.error_color = Colors.FAIL
self.main_process_verbosity = 1
self.warning_color = Colors.WARNING
self.warning_verbosity = 1
self.main_process_color = Colors.OKGREEN
self.sub_process_verbosity = 2
self.sub_process_color = Colors.OKBLUE
self.command_verbosity = 3
# Sub-commands
self.sub_command = opts.sub_command
self.plink_command = None
self.ancestry_infile = None
self.combined_copying_fractions = None
self.combined_output_file = None
self.fam_infile = None
self.haplotype_file = None
self.haplotypes = None
self.map_dir = None
self.max_rate = None
self.max_snps = None
self.min_snps = None
self.out_prefix = None
self.pop_group_file = None
self.query_combined_file = None
self.query_copying_fractions = None
self.query_output_file = None
self.query_prefix = None
self.query_tfam = None
self.query_tfam_file = None
self.query_tped_file = None
self.reference_copying_fractions = None
self.reference_haplotype_counts = None
self.reference_haplotype_fractions = None
self.reference_individual_populations = None
self.reference_individuals = None
self.reference_output_file = None
self.reference_pickle_output_file = None
self.reference_population_counts = None
self.reference_populations = None
self.reference_prefix = None
self.reference_tfam = None
self.reference_tfam_file = None
self.reference_tped_file = None
# All sub-command options
if self.sub_command == 'all':
# TODO: Test all subcommand
# Required options
self.reference_prefix = opts.reference_prefix
self.query_prefix = opts.query_prefix
self.out_prefix = opts.out_prefix
self.map_dir = opts.map_dir
self.haplotype_file = opts.haplotypes
self.pop_group_file = opts.pop_group_file
# Outputs to be made
self.reference_pickle_output_file = None
self.reference_output_file = None
self.reference_pickle_file = None
self.query_output_file = None
self.combined_output_file = None
self.ancestry_infile = None
self.fam_infile = None
# Hapmake
self.min_snps = opts.min_snps
self.max_snps = opts.max_snps
self.max_rate = opts.max_rate
# Build options
self.reference_tpeds = pd.Series([], dtype=pd.StringDtype())
self.reference_background = pd.Series([], dtype=pd.StringDtype())
# Query options
self.query_tpeds = pd.Series([], dtype=pd.StringDtype())
self.query_individuals = None
# aggregate options
self.ref_pop_group_map = {}
self.ind_pop_map = {}
self.pop_ind_map = {} # reverse map of ind_pop_map, sacrificing memory for speed later on
self.reference_individual_dict = {}
self.reference_pops = {}
self.ancestry_fractions = {}
self.af_header = []
self.painting_vectors = {}
self.painting_vectors_keys = []
self.fine_grain_estimates = {}
# Hapmake sub-command options
if self.sub_command == 'hapmake':
self.min_snps = opts.min_snps
self.max_snps = opts.max_snps
self.max_rate = opts.max_rate
self.map_dir = opts.map_dir
self.haplotype_file = opts.haplotypes
# Build sub-command options
if self.sub_command == 'build':
self.reference_pickle_output_file = opts.reference_pickle_out
self.reference_prefix = opts.reference_prefix
self.haplotype_file = opts.haplotypes
self.reference_output_file = opts.reference_out
self.reference_tpeds = pd.Series([], dtype=pd.StringDtype())
self.reference_background = pd.Series([], dtype=pd.StringDtype())
# Query sub-command options
if self.sub_command == 'query':
self.reference_pickle_file = opts.reference_pickle
self.query_prefix = opts.query_prefix
self.query_output_file = opts.query_out
self.combined_output_file = opts.combined_out
self.query_tpeds = pd.Series([], dtype=pd.StringDtype())
self.query_individuals = None
# Co-ancestry sub-command options
if self.sub_command == 'coanc':
self.haplotype_file = opts.haplotypes
self.reference_prefix = opts.reference_prefix
self.query_prefix = opts.query_prefix
self.reference_output_file = opts.reference_out
self.query_output_file = opts.query_out
self.query_combined_file = opts.combined_out
# Aggregate sub-command options
if self.sub_command == 'aggregate':
self.ancestry_infile = opts.ancestry_infile
self.pop_group_file = opts.pop_group_file
self.out_prefix = opts.out_prefix
self.fam_infile = opts.fam_infile
self.ref_pop_group_map = {}
self.ind_pop_map = {}
self.pop_ind_map = {} # reverse map of ind_pop_map, sacrificing memory for speed later on
self.reference_individual_dict = {}
self.reference_pops = {}
self.ancestry_fractions = {}
self.af_header = []
self.painting_vectors = {}
self.painting_vectors_keys = []
self.fine_grain_estimates = {}
Support.init_logger(log_file=self.log_file, verbosity=self.verbosity)
"""
[Class section] Run control
"""
def validate_options(self):
plink_command = Support.find_plink_binary()
if plink_command is None:
self.errors += ["Can't find plink or plink2. Please make sure the binary exists as one of those two names"]
else:
self.plink_command = plink_command
if self.sub_command == "all":
self.validate_and_set_all_subcommand_options()
# self.analysis += ['build_reference_set', 'query_reference_set', 'build_coanc', 'post_pastrami']
self.analysis += ['build_reference_set', 'query_reference_set', 'post_pastrami']
if self.sub_command == 'hapmake':
self.validate_map_dir()
self.analysis += ['make_haplotypes']
if self.sub_command == 'build':
self.validate_reference_prefix()
self.validate_haplotypes()
self.analysis += ['build_reference_set']
if self.sub_command == 'query':
self.validate_reference_pickle()
self.validate_query_prefix()
self.analysis += ['query_reference_set']
if self.sub_command == 'coanc':
self.validate_reference_prefix()
if self.query_prefix is not None:
self.validate_query_prefix()
self.validate_haplotypes()
self.analysis += ['build_coanc']
if self.sub_command == 'aggregate':
self.validate_ancestry_infile()
self.validate_pop_group_file()
self.validate_fam_infile()
self.analysis += ['post_pastrami']
if len(self.analysis) == 0:
self.errors = self.errors + ['Nothing to do!']
def __str__(self):
long_string = f"""
Class constants:
chromosomes = {Analysis.chromosomes}
fake_run = {Analysis.fake_run}
debug = {Analysis.debug}
min_haplotype_occurences = {Analysis.min_haplotype_occurences}
optim_step_size = {Analysis.optim_step_size}
error_threshold = {Analysis.error_threshold}
optim_iterations = {Analysis.optim_iterations}
tolerance = {Analysis.tolerance}
ancestry_fraction_postfix = {Analysis.ancestry_fraction_postfix}
ancestry_painting_postfix = {Analysis.ancestry_painting_postfix}
pop_estimates_postfix = {Analysis.pop_estimates_postfix}
finegrain_estimates_postfix = {Analysis.finegrain_estimates_postfix}
Instance variables:
* General program parameters
log_file = {self.log_file}
threads = {self.threads}
verbosity = {self.verbosity}
* Verbosity options
command_verbosity = {self.command_verbosity}
main_process_verbosity = {self.main_process_verbosity}
sub_process_verbosity = {self.sub_process_verbosity}
warning_verbosity = {self.warning_verbosity}
* Subcommand to be executed
sub_command = {self.sub_command}
* Hapmake-specific parameter options
max_rate = {self.max_rate}
max_snps = {self.max_snps}
min_snps = {self.min_snps}
* Hapmake-specific input/output options
out_prefix = {self.out_prefix}
map_dir = {self.map_dir}
haplotype_file = {self.haplotype_file}
* Query files input/output options
query_prefix = {self.query_prefix}
query_tfam_file = {self.query_tfam_file}
query_tped_file = {self.query_tped_file}
query_output_file = {self.query_output_file}
query_combined_file = {self.query_combined_file}
* Reference files input/output options
reference_prefix = {self.reference_prefix}
reference_tfam_file = {self.reference_tfam_file}
reference_tped_file = {self.reference_tped_file}
reference_output_file = {self.reference_output_file}
reference_pickle_output_file = {self.reference_pickle_output_file}
* Combined query-reference file location
combined_output_file = {self.combined_output_file}
* Aggregate-specific options
pop_group_file = {self.pop_group_file}
ancestry_infile = {self.ancestry_infile}
fam_infile = {self.fam_infile}
"""
return long_string
# TODO: Print a summary of what parameters were provided, what needs to be performed
def summarize_run(self):
logging.info(self.main_process_color + str(self) + Colors.ENDC)
logging.info(self.main_process_color + f"Analysis to perform: " + ",".join(self.analysis) + Colors.ENDC)
def go(self):
self.summarize_run()
# self.pool = mp.Pool(processes=self.threads)
self.pool = mp.ProcessingPool(nodes=self.threads)
while True:
step = self.analysis[0]
self.analysis = self.analysis[1:]
function = getattr(self, step)
function()
if len(self.analysis) == 0:
break
# self.pool.terminate()
"""
[Class section] Functions for validating file
"""
def validate_and_set_all_subcommand_options(self):
self.validate_reference_prefix()
self.validate_query_prefix()
Support.validate_output_prefix(self.out_prefix)
if self.haplotype_file is None:
if self.map_dir is None:
self.errors += [self.sub_command + ' requires --haplotypes or --map-dir']
return
else:
self.validate_map_dir()
self.haplotype_file = self.out_prefix + ".hap"
if self.pop_group_file is None:
self.pop_group_file = self.out_prefix + ".pop_group.tsv"
Support.create_pop_group_from_tfam(tfam_in_file=self.reference_tfam_file, tsv_out_file=self.pop_group_file)
else:
self.validate_pop_group_file()
self.reference_pickle_output_file = self.out_prefix + ".pickle"
self.reference_output_file = self.out_prefix + ".hap"
self.reference_pickle_file = self.reference_pickle_output_file
self.query_output_file = self.out_prefix + "_query.tsv"
self.combined_output_file = self.out_prefix + ".tsv"
self.ancestry_infile = self.combined_output_file
self.fam_infile = self.out_prefix + ".fam"
Support.merge_fam_files(infile1=self.query_tfam_file,
infile2=self.reference_tfam_file,
outputfile=self.fam_infile)
def validate_haplotypes(self):
if self.haplotype_file is None:
self.errors += [self.sub_command + ' requires --haplotypes']
return
Support.validate_file_and_size_or_error(the_file=self.haplotype_file, error_prefix='Haplotype file',
fake_run=self.fake_run)
def validate_reference_prefix(self):
if self.reference_prefix is None:
self.errors += [self.sub_command + ' requires --reference-prefix']
return
self.reference_tped_file = self.reference_prefix + '.tped'
self.reference_tfam_file = self.reference_prefix + '.tfam'
for i in [self.reference_tped_file, self.reference_tfam_file]:
Support.validate_file_and_size_or_error(the_file=i, fake_run=self.fake_run)
def validate_query_prefix(self):
if self.query_prefix is None:
self.errors += [self.sub_command + ' requires --query-prefix']
return
self.query_tped_file = self.query_prefix + '.tped'
self.query_tfam_file = self.query_prefix + '.tfam'
for i in [self.query_tped_file, self.query_tfam_file]:
Support.validate_file_and_size_or_error(the_file=i, fake_run=self.fake_run)
def validate_reference_pickle(self):
if self.reference_pickle_file is None:
self.errors += [self.sub_command + ' requires --query-prefix']
return
Support.validate_file_and_size_or_error(the_file=self.reference_pickle_file,
error_prefix='Reference pickle', fake_run=self.fake_run)
def validate_map_dir(self):
if self.map_dir is None:
self.errors += [self.sub_command + ' requires --map-dir']
return
Support.validate_dir_or_error(the_dir=self.map_dir, error_prefix='Map directory', fake_run=self.fake_run)
def validate_ancestry_infile(self):
if self.ancestry_infile is None:
self.errors += [self.sub_command + ' requires --pastrami-output']
return
Support.validate_file_and_size_or_error(the_file=self.ancestry_infile,
error_prefix='Pastrami\' query output',
fake_run=self.fake_run)
# TODO: If user doesn't supply pop-group file, create one based on the TFAM file
def validate_pop_group_file(self):
if self.pop_group_file is None:
self.errors += [self.sub_command + ' requires --pop-group']
return
Support.validate_file_and_size_or_error(the_file=self.pop_group_file,
error_prefix='Population group mapping file',
fake_run=self.fake_run)
def validate_fam_infile(self):
if self.fam_infile is None:
self.errors += [self.sub_command + ' requires --pastrami-fam']
return
Support.validate_file_and_size_or_error(the_file=self.fam_infile,
error_prefix='FAM input',
fake_run=self.fake_run)
"""
[Class section] Haplotype maker
"""
def process_hapmap_file(self, chrom: int):
logging.info(f"[hapmake|chr{chrom}] Started processing")
haplotypes = ""
map_data = []
with open(os.path.join(self.map_dir, f"chr{chrom}.map"), "r") as f:
for line in f:
(position, cmorgan, snp) = line.rstrip().split("\t")
map_data.append([int(position), float(cmorgan), snp])
logging.info(f"[hapmake|chr{chrom}] File read")
left_snp = 0
right_snp = 0
snps = False
for row in range(len(map_data)):
right_snp += 1
if right_snp >= len(map_data):
break
# If the two SNPs have a recombination rate great than the max rate
if map_data[right_snp][1] - map_data[left_snp][1] >= self.max_rate:
if right_snp - left_snp >= self.min_snps:
snps = True
else:
left_snp = right_snp
right_snp += 1
# If the haplotype is long enough
if right_snp - left_snp >= self.max_snps:
snps = True
# If snps isn't False, then save the range of the window
if snps is True:
haplotypes += f"{chrom}\t{left_snp}\t{right_snp}\t{map_data[right_snp - 2][1] - map_data[left_snp][1]}\n"
snps = False
left_snp = right_snp
logging.info(f"[hapmake|chr{chrom}] All haplotypes discovered")
return haplotypes
def make_haplotypes(self):
logging.info(f"[hapmake|chrAll] Starting pool for processing")
# pool = mp.Pool(processes=self.threads)
# self.pool.restart()
results = self.pool.map(self.process_hapmap_file, range(1, 23))
# results.wait()
# results = results.get()
with open(self.haplotype_file, "w") as f:
f.write("".join(results) + "\n")
logging.info(f"[hapmake|chrAll] Files written")
# self.pool.close()
# self.pool.join()
# self.pool.terminate()
"""
[Class section] Core Pastrami code - to be fragmented further in future
"""
def load_reference_pickle(self):
logging.info('Loading reference pickle ' + self.reference_pickle_file)
pickle_file_handle = open(self.reference_pickle_file, 'rb')
old_pickle = pickle.load(pickle_file_handle)
self.reference_tfam = old_pickle.reference_tfam
# self.reference_haplotype_counts = old_pickle.reference_haplotype_counts
self.reference_haplotype_fractions = old_pickle.reference_haplotype_fractions
self.reference_populations = old_pickle.reference_populations
self.reference_background = old_pickle.reference_background
self.haplotypes = old_pickle.haplotypes
self.reference_copying_fractions = old_pickle.reference_copying_fractions
del old_pickle
logging.info('Reference pickle file loaded!')
def load_reference_tfam(self):
self.reference_tfam = | pd.read_table(self.reference_tfam_file, index_col=None, header=None, sep=' ') | pandas.read_table |
import os
from functools import reduce
import pandas as pd
import numpy as np
from . import settings
def get_data(cryptocurrency, fillna=0):
crypto_path = os.path.join(settings.RESOURCES_DIR, cryptocurrency)
# Currency related data frames
price_df = _read_csv(os.path.join(crypto_path, 'price.csv'))
_lower_headers(price_df)
# price_df = _floaterize_prices(price_df)
price_df['date'] = pd.to_datetime(price_df['date'])
transactions_df = _read_csv(os.path.join(crypto_path, 'transactions.csv'))
_lower_headers(transactions_df)
transactions_df['date'] = pd.to_datetime(transactions_df['date'])
# Forum related data frames
reply_df = _read_csv(os.path.join(crypto_path, 'reply_opinion.csv'))
_lower_headers(reply_df)
topic_df = _read_csv(os.path.join(crypto_path, 'topic_opinion.csv'))
_lower_headers(topic_df)
# Categorize vader scores
reply_df = _transform_vader_series(reply_df, 'reply')
topic_df = _transform_vader_series(topic_df, 'topic')
# Drop useless columns
_drop_inplace(reply_df, ['reply', 'vader'])
_drop_inplace(topic_df, ['topic', 'reply', 'topiccontent', 'vader', 'opinion'])
# Group by date and aggregate vader categorical columns
reply_df = _fold_categorical_vader(reply_df, 'reply', by='date')
topic_df = _fold_categorical_vader(topic_df, 'topic', by='date', agg={'views':'sum'})
# Calculate daily sentiment
reply_df = _sum_categorical_vader(reply_df, 'reply')
topic_df = _sum_categorical_vader(topic_df, 'topic')
# Set date as index for forum related dfs
reply_df['date'] = pd.to_datetime(reply_df['date'])
reply_df.index = pd.DatetimeIndex(reply_df['date'])
reply_df = reply_df.drop(columns='date')
topic_df['date'] = | pd.to_datetime(topic_df['date']) | pandas.to_datetime |
import pandas as pd
c1 = pd.read_csv('machine/Calling/Sensors_1.csv')
c2 = pd.read_csv('machine/Calling/Sensors_2.csv')
c3 = pd.read_csv('machine/Calling/Sensors_3.csv')
c4 = pd.read_csv('machine/Calling/Sensors_4.csv')
c5 = pd.read_csv('machine/Calling/Sensors_5.csv')
c6 = pd.read_csv('machine/Calling/Sensors_6.csv')
c7 = pd.read_csv('machine/Calling/Sensors_7.csv')
c8 = pd.read_csv('machine/Calling/Sensors_8.csv')
c9 = pd.read_csv('machine/Calling/Sensors_9.csv')
c10 = pd.read_csv('machine/Calling/Sensors_10.csv')
calling = pd.concat([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10], axis = 0)
t1 = pd.read_csv('machine/Texting/Sensors_1.csv')
t2 = pd.read_csv('machine/Texting/Sensors_2.csv')
t3 = pd.read_csv('machine/Texting/Sensors_3.csv')
t4 = pd.read_csv('machine/Texting/Sensors_4.csv')
t5 = pd.read_csv('machine/Texting/Sensors_5.csv')
t6 = pd.read_csv('machine/Texting/Sensors_6.csv')
t7 = pd.read_csv('machine/Texting/Sensors_7.csv')
t8 = | pd.read_csv('machine/Texting/Sensors_8.csv') | pandas.read_csv |
import os
import minerva as mine
import pandas as pd
import random
import re
def sentence_to_conll_string(
sentence: mine.Sentence, entity_name: str, conflate: bool = False
) -> str:
words = [t.text for t in sentence]
annos = sentence.get_annotation(entity_name)
labels = ["O"] * len(words)
if annos:
ner = isinstance(annos[0], mine.TokenSpan)
if ner:
for span in annos:
entity_tag = span.value if not conflate else "Entity"
labels[span.start_index] = "B-" + entity_tag
for index in range(span.start_index + 1, span.end_index + 1):
labels[index] = "I-" + entity_tag
else:
raise NotImplementedError("Ouput for non-spans is not implemented yet.")
return "\n".join(["\t".join(line) for line in zip(words, labels)]) + "\n"
XLSX_PATH = "data/PsyTAR_dataset.xlsx"
CSV_PATH = "data/PsyTAR_binary.csv"
BINARY_PATH = "data/binary/"
CONLL_ALL_PATH = "data/all/"
CONLL_CONFLATED_PATH = "data/conflated/"
sentence_df = pd.read_excel(
XLSX_PATH, sheet_name="Sentence_Labeling", dtype={"drug_id": str, "sentences": str}
)
sentence_df = sentence_df[
["drug_id", "sentence_index", "sentences", "ADR", "WD", "EF", "INF", "SSI", "DI"]
]
sentence_df = sentence_df.dropna(subset=["sentences"])
sentence_df = sentence_df.loc[sentence_df.sentences.apply(lambda x: len(x.strip())) > 0]
sentence_df = sentence_df.fillna(0)
sentence_df[["ADR", "WD", "EF", "INF", "SSI", "DI"]] = (
sentence_df[["ADR", "WD", "EF", "INF", "SSI", "DI"]]
.replace(re.compile("[!* ]+"), 1)
.astype(int)
)
print("Writing binary datasets...")
out_df = sentence_df[["sentences", "ADR", "WD", "EF", "INF", "SSI", "DI"]].iloc[:-1]
out_df.to_csv(
BINARY_PATH + os.sep + "full.csv",
header=True,
index=False,
sep="\t",
encoding="utf8",
decimal=".",
)
train_df = out_df.sample(frac=0.7, random_state=120307)
testdev_df = out_df.drop(train_df.index)
test_df = testdev_df.sample(frac=0.66, random_state=703021)
dev_df = testdev_df.drop(test_df.index)
train_df.to_csv(
BINARY_PATH + os.sep + "train.csv",
header=True,
index=False,
sep="\t",
encoding="utf8",
decimal=".",
)
test_df.to_csv(
BINARY_PATH + os.sep + "test.csv",
header=True,
index=False,
sep="\t",
encoding="utf8",
decimal=".",
)
dev_df.to_csv(
BINARY_PATH + os.sep + "dev.csv",
header=True,
index=False,
sep="\t",
encoding="utf8",
decimal=".",
)
print("Done.")
sentences_map = {}
for drug_name in sentence_df.drug_id.unique():
sentences_map[drug_name] = {}
for _, row in sentence_df.iterrows():
sentences_map[row.drug_id][row.sentence_index] = mine.Sentence(row.sentences)
sheet_names = ["ADR", "WD", "SSI", "DI"]
invalid_sentences = set()
total_annos = 0
total_warns = 0
for sheet in sheet_names:
labels_df = | pd.read_excel(XLSX_PATH, sheet_name=sheet + "_Identified") | pandas.read_excel |
import numpy as np
import pandas as pd
def set_order(df, row):
if pd.isnull(row['order']):
if pd.notnull(row['family']):
row['order'] = df[(pd.notnull(df['order']) &
df['family']== row['family'])]['order'].head(1)
elif pd.notnull(row['genus']):
row['order'] = df[( | pd.notnull(df['order']) | pandas.notnull |
# Copyright (c) 2017, Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = '<NAME>, <NAME>'
__copyright__ = "Copyright (c) 2017, Intel Research and Development Ireland Ltd."
__license__ = "Apache 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from analytics_engine.heuristics.beans.infograph import InfoGraphNode, InfoGraphNodeType, \
InfoGraphNodeCategory, InfoGraphNodeLayer
import pandas
import math
class Fingerprint(object):
# Deprecated
@staticmethod
def _node_is_nic_on_management_net(node, graph, mng_net_name):
node_name = InfoGraphNode.get_name(node)
node_type = InfoGraphNode.get_type(node)
if node_type == InfoGraphNodeType.VIRTUAL_NIC or \
node_type == InfoGraphNodeType.VIRTUAL_NIC_2:
neighs = graph.neighbors(node_name)
for n in neighs:
neighbor = InfoGraphNode.\
get_node(graph, n)
if InfoGraphNode.get_type(neighbor) == \
InfoGraphNodeType.VIRTUAL_NETWORK:
network_name = \
InfoGraphNode.get_attributes(
neighbor)['name']
if network_name == mng_net_name:
return True
return False
@staticmethod
def workload_capacity_usage(annotated_subgraph):
"""
This is a type of fingerprint
"""
# TODO: Validate graph
categories = list()
categories.append(InfoGraphNodeCategory.COMPUTE)
categories.append(InfoGraphNodeCategory.NETWORK)
# TODO: Add a Volume to the workloads to get HD usage
categories.append(InfoGraphNodeCategory.STORAGE)
# TODO: Get telemetry for Memory
categories.append(InfoGraphNodeCategory.MEMORY)
fingerprint = dict()
counter = dict()
for category in categories:
fingerprint[category] = 0
counter[category] = 0
# calculation of the fingerprint on top of the virtual resources
local_subgraph = annotated_subgraph.copy()
local_subgraph.filter_nodes('layer', "physical")
local_subgraph.filter_nodes('layer', "service")
for node in local_subgraph.nodes(data=True):
# if Fingerprint._node_is_nic_on_management_net(
# node, annotated_subgraph, mng_net_name):
# continue
category = InfoGraphNode.get_category(node)
utilization = InfoGraphNode.get_utilization(node)
if 'utilization' in utilization.columns.values:
mean = utilization['utilization'].mean()
fingerprint[category] += mean
counter[category] += 1
# This is just an average
# TODO: Improve the average
for category in categories:
if counter[category] > 0:
fingerprint[category] = \
fingerprint[category] / counter[category]
return fingerprint
@staticmethod
def machine_capacity_usage(annotated_subgraph):
"""
This is a type of fingerprint from the infrastructure perspective
"""
# TODO: Validate graph
categories = list()
categories.append(InfoGraphNodeCategory.COMPUTE)
categories.append(InfoGraphNodeCategory.NETWORK)
# TODO: Add a Volume to the workloads to get HD usage
categories.append(InfoGraphNodeCategory.STORAGE)
# TODO: Get telemetry for Memory
categories.append(InfoGraphNodeCategory.MEMORY)
fingerprint = dict()
counter = dict()
for category in categories:
fingerprint[category] = 0
counter[category] = 0
# calculation of the fingerprint on top of the virtual resources
local_subgraph = annotated_subgraph.copy()
local_subgraph.filter_nodes('layer', "virtual")
local_subgraph.filter_nodes('layer', "service")
local_subgraph.filter_nodes('type', 'machine')
for node in local_subgraph.nodes(data=True):
# if Fingerprint._node_is_nic_on_management_net(
# node, annotated_subgraph, mng_net_name):
# continue
name = InfoGraphNode.get_name(node)
category = InfoGraphNode.get_category(node)
utilization = InfoGraphNode.get_utilization(node)
if 'utilization' in utilization.columns.values:
# LOG.info("NODE: {} - CATEGORY: {}".format(name, category))
mean = utilization['utilization'].mean()
fingerprint[category] += mean
counter[category] += 1
# This is just an average
# TODO: Improve the average
for category in categories:
if counter[category] > 0:
fingerprint[category] = \
fingerprint[category] / counter[category]
return fingerprint
@staticmethod
def compute_node(annotated_subgraph, hostname=None):
"""
This is a type of fingerprint from the infrastructure perspective
"""
# TODO: Validate graph
data = dict()
statistics = dict()
compute = InfoGraphNodeCategory.COMPUTE
data[compute] = pandas.DataFrame()
statistics[compute] = {'mean': 0, 'median': 0, 'min': 0, 'max': 0, 'var': 0, 'std_dev': 0}
network = InfoGraphNodeCategory.NETWORK
data[network] = pandas.DataFrame()
statistics[network] = {'mean': 0, 'median': 0, 'min': 0, 'max': 0, 'var': 0, 'std_dev': 0}
storage = InfoGraphNodeCategory.STORAGE
data[storage] = pandas.DataFrame()
statistics[storage] = {'mean': 0, 'median': 0, 'min': 0, 'max': 0, 'var': 0, 'std_dev': 0}
memory = InfoGraphNodeCategory.MEMORY
data[memory] = pandas.DataFrame()
statistics[memory] = {'mean': 0, 'median': 0, 'min': 0, 'max': 0, 'var': 0, 'std_dev': 0}
# Calculation of the fingerprint on top of the virtual resources
local_subgraph = annotated_subgraph.copy()
for node in local_subgraph.nodes(data=True):
layer = InfoGraphNode.get_layer(node)
is_machine = InfoGraphNode.node_is_machine(node)
if is_machine:
continue
if layer == InfoGraphNodeLayer.VIRTUAL:
continue
if layer == InfoGraphNodeLayer.SERVICE:
continue
# If hostname has been specified, need to take into account only
# nodes that are related to the specific host
attrs = InfoGraphNode.get_attributes(node)
allocation = attrs['allocation'] if 'allocation' in attrs \
else None
if hostname and not hostname == allocation:
continue
category = InfoGraphNode.get_category(node)
utilization = InfoGraphNode.get_utilization(node)
try:
utilization = utilization.drop('timestamp', 1)
except ValueError:
utilization = InfoGraphNode.get_utilization(node)
data[category] = pandas.concat([data[category], utilization])
for category in statistics:
if not data[category].empty:
mean = data[category]['utilization'].mean()
median = (data[category]['utilization']).median()
min = data[category]['utilization'].min()
maximum = data[category]['utilization'].max()
var = data[category]['utilization'].var()
std_dev = math.sqrt(var)
else:
mean = 0
median = 0
min = 0
maximum = 0
var = 0
std_dev = 0
statistics[category] = \
{'mean': mean,
'median': median,
'min': min,
'max': maximum,
'var': var,
'std_dev': std_dev}
return [data, statistics]
@staticmethod
def compute_node_resources(annotated_subgraph, hostname=None):
"""
This is a type of fingerprint from the infrastructure perspective
"""
# TODO: Validate graph
data = dict()
statistics = dict()
# Calculation of the fingerprint on top of the virtual resources
local_subgraph = annotated_subgraph.copy()
for node in local_subgraph.nodes(data=True):
layer = InfoGraphNode.get_layer(node)
if layer == InfoGraphNodeLayer.VIRTUAL:
continue
if layer == InfoGraphNodeLayer.SERVICE:
continue
type = InfoGraphNode.get_type(node)
if type == 'core':
continue
# If hostname has been specified, need to take into account only
# nodes that are related to the specific host
attrs = InfoGraphNode.get_attributes(node)
allocation = attrs['allocation'] if 'allocation' in attrs \
else None
if hostname and not hostname == allocation:
continue
name = InfoGraphNode.get_name(node)
statistics[name] = {'mean': 0,
'median': 0,
'min': 0,
'max': 0,
'var': 0,
'std_dev': 0}
utilization = InfoGraphNode.get_utilization(node)
try:
utilization = utilization.drop('timestamp', 1)
except ValueError:
utilization = InfoGraphNode.get_utilization(node)
data[name] = utilization
if not data[name].empty:
mean = data[name]['utilization'].mean()
median = (data[name]['utilization']).median()
min = data[name]['utilization'].min()
maximum = data[name]['utilization'].max()
var = data[name]['utilization'].var()
std_dev = math.sqrt(var)
else:
mean = 0
median = 0
min = 0
maximum = 0
var = 0
std_dev = 0
statistics[name] = \
{'mean': mean,
'median': median,
'min': min,
'max': maximum,
'var': var,
'std_dev': std_dev}
return [data, statistics]
@staticmethod
def workload(nodes):
"""
This is a type of fingerprint from the infrastructure perspective
"""
# TODO: Validate graph
data = dict()
statistics = dict()
compute = InfoGraphNodeCategory.COMPUTE
data[compute] = pandas.DataFrame()
statistics[compute] = {'mean': 0, 'median': 0, 'min': 0, 'max': 0, 'var': 0, 'std_dev': 0}
network = InfoGraphNodeCategory.NETWORK
data[network] = pandas.DataFrame()
statistics[network] = {'mean': 0, 'median': 0, 'min': 0, 'max': 0, 'var': 0, 'std_dev': 0}
storage = InfoGraphNodeCategory.STORAGE
data[storage] = pandas.DataFrame()
statistics[storage] = {'mean': 0, 'median': 0, 'min': 0, 'max': 0, 'var': 0, 'std_dev': 0}
memory = InfoGraphNodeCategory.MEMORY
data[memory] = | pandas.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os, errno
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
from scipy.spatial.distance import squareform
from sklearn.decomposition.nmf import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
def save_df_to_npz(obj, filename):
np.savez_compressed(filename, data=obj.values, index=obj.index.values, columns=obj.columns.values)
def save_df_to_text(obj, filename):
obj.to_csv(filename, sep='\t')
def load_df_from_npz(filename):
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (p for i,p in enumerate(iterable) if (i-worker_index)%total_workers==0)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1,1))
D += squared_norms.reshape((1,-1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return(beta)
def fast_ols_all_cols_df(X,Y):
beta = fast_ols_all_cols(X, Y)
beta = pd.DataFrame(beta, index=X.columns, columns=Y.columns)
return(beta)
def var_sparse_matrix(X):
mean = np.array(X.mean(axis=0)).reshape(-1)
Xcopy = X.copy()
Xcopy.data **= 2
var = np.array(Xcopy.mean(axis=0)).reshape(-1) - (mean**2)
return(var)
def get_highvar_genes_sparse(expression, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_mean = np.array(expression.mean(axis=0)).astype(float).reshape(-1)
E2 = expression.copy(); E2.data **= 2; gene2_mean = np.array(E2.mean(axis=0)).reshape(-1)
gene_var = pd.Series(gene2_mean - (gene_mean**2))
del(E2)
gene_mean = pd.Series(gene_mean)
gene_fano = gene_var / gene_mean
# Find parameters for expected fano line
top_genes = gene_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_var)/gene_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_fano.quantile([0.10, 0.90])
winsor_box = ((gene_fano > w_fano_low) &
(gene_fano < w_fano_high) &
(gene_mean > w_mean_low) &
(gene_mean < w_mean_high))
fano_median = gene_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A**2)*gene_mean + (B**2)
fano_ratio = (gene_fano/gene_expected_fano)
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T=None
else:
if not expected_fano_threshold:
T = (1. + gene_counts_fano[winsor_box].std())
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame({
'mean': gene_mean,
'var': gene_var,
'fano': gene_fano,
'expected_fano': gene_expected_fano,
'high_var': high_var_genes_ind,
'fano_ratio': fano_ratio
})
gene_fano_parameters = {
'A': A, 'B': B, 'T':T, 'minimal_mean': minimal_mean,
}
return(gene_counts_stats, gene_fano_parameters)
def get_highvar_genes(input_counts, expected_fano_threshold=None,
minimal_mean=0.01, numgenes=None):
# Find high variance genes within those cells
gene_counts_mean = pd.Series(input_counts.mean(axis=0).astype(float))
gene_counts_var = pd.Series(input_counts.var(ddof=0, axis=0).astype(float))
gene_counts_fano = | pd.Series(gene_counts_var/gene_counts_mean) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import calendar
import seaborn as sns
sns.set(style='white', palette='deep')
plt.style.use('grayscale')
warnings.filterwarnings('ignore')
width = 0.35
# Funções
def autolabel(rects,ax, df): #autolabel
for rect in rects:
height = rect.get_height()
ax.annotate('{} ({:.2f}%)'.format(height, height*100/df.shape[0]),
xy = (rect.get_x() + rect.get_width()/2, height),
xytext= (0,3),
textcoords="offset points",
ha='center', va='bottom', fontsize=15)
def autolabel_without_pct(rects,ax): #autolabel
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy = (rect.get_x() + rect.get_width()/2, height),
xytext= (0,3),
textcoords="offset points",
ha='center', va='bottom', fontsize=15)
def autolabel_horizontal(rects,ax):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
width = rect.get_width()
ax.text(rect.get_x() + rect.get_width()+3, rect.get_y() + rect.get_height()/2.,
'%.2f' % width,
ha='center', va='center', color='black', fontsize=15)
# Importando o Arquivo
df = pd.read_excel('Banco de Dados - WDO.xlsx')
# Verificando Null Values
df.isnull().sum()
null_values = (df.isnull().sum()/len(df)*100)
null_values = pd.DataFrame(null_values, columns= ['% Null Values'])
null_values
# Deletando Null Values
df_feature = df.copy()
df_feature.dropna(inplace=True)
df_feature.isnull().sum()
# Alterando nome de colunas
bank = ['corretora_', 'bank_']
letters = ['<KEY>']
new_columns = np.array([])
for i in bank:
for j in range(0,4):
new_columns = np.append(new_columns, i+list(letters[0])[j])
df_feature.columns
count = 0
for i in df_feature.loc[:, ['win_xp_(5m)', 'win_rico_(5m)', 'win_clear_(5m)',
'win_modal_(5m)', 'win_ubs_(5m)', 'win_btg_(5m)', 'win_bradesco_(5m)',
'win_genial(5m)']]:
df_feature.rename(columns={i:new_columns[count]}, inplace=True)
count+=1
# Verificando erro de digitação
df_feature.columns
df_feature.set_index('data', inplace=True)
max_value = np.array([])
min_value = np.array([])
max_index = np.array([])
min_index = np.array([])
max_time = np.array([])
min_time = np.array([])
count = 0
value_error_final = pd.DataFrame()
for i in df_feature.loc[:,['abertura', 'maxima', 'minima',
'fechamento', '20mma_maxima_2m', '20mma_minima_2m', '9mme_fechamento',
'200mma_fechamento', '20mma_maxima_5m', '20mma_minima_5m',
'volume_financeiro','corretora_a', 'corretora_b', 'corretora_c',
'corretora_d', 'bank_a', 'bank_b', 'bank_c', 'bank_d','gain', 'quantas_correcoes',
'quantos_pontos_avancou', 'quantos_pontos_retornados']]:
max_value = np.append(max_value,df_feature[i].max())
min_value = np.append(min_value,df_feature[i].min())
max_index = np.append(max_index,df_feature.loc[:,i].idxmax())
min_index = np.append(min_index,df_feature.loc[:,i].idxmin())
max_time = np.append(max_time,df_feature[df_feature[i] == df_feature[i].max()]['horario'])
min_time = np.append(min_time,df_feature[df_feature[i] == df_feature[i].min()]['horario'])
print('O máximo valor para a coluna |{}| foi de {}, no dia {} e no horário {}'.format(i,max_value[count],
max_index[count],max_time[count]))
print('O mínimo valor para a coluna |{}| foi de {}, no dia {} e no horário {}'.format(i,min_value[count],
min_index[count], min_time[count]))
print('*'*100)
valer_error = pd.DataFrame({'valor_max':[max_value[count]],
'dia_max': [max_index[count]],
'horario_max': [max_time[count]],
'valor_min':[min_value[count]],
'dia_min': [min_index[count]],
'horario_min': [min_time[count]]}, index=[i])
value_error_final = pd.concat([valer_error,value_error_final])
count+=1
df_feature = df_feature.drop('gain', axis=1)
#Pela amplitude podemos verificar erros de digitação nas colunas |máximas| e |mínimas|
df_feature['amplitude'] = df_feature['maxima']-df_feature['minima'] # Criando coluna amplitude
amplitude_error = df_feature[df_feature['amplitude'] <0][['maxima', 'minima', 'horario']]
#Verificando se a ME9 está menor que as MA20 de ativação
nove_compra_error = df_feature[df_feature['tipo_de_negociacao']=='compra'][['20mma_maxima_2m', '9mme_fechamento'
, 'horario']]
nove_venda_error = df_feature[df_feature['tipo_de_negociacao']=='venda'][['20mma_minima_2m', '9mme_fechamento'
, 'horario']]
nove_compra_error['error'] = nove_compra_error['9mme_fechamento']-nove_compra_error['20mma_maxima_2m']
nove_venda_error['error'] = nove_venda_error['9mme_fechamento']-nove_venda_error['20mma_minima_2m']
nove_compra_error = nove_compra_error[nove_compra_error['error'].values<0]
nove_venda_error = nove_venda_error[nove_venda_error['error'].values>0]
nove_compra_error[['20mma_maxima_2m', '9mme_fechamento']] = nove_compra_error[['9mme_fechamento','20mma_maxima_2m']].where(nove_compra_error['error']<0,
nove_compra_error[['20mma_maxima_2m', '9mme_fechamento']].values)
nove_venda_error[['20mma_minima_2m','9mme_fechamento']] = nove_venda_error[['9mme_fechamento','20mma_minima_2m']].where(nove_venda_error['error']>0,
nove_venda_error[['20mma_minima_2m','9mme_fechamento']].values)
df_feature.groupby(df_feature.index)['horario'].get_group('2019-06-03 ')[0]
df_feature.groupby(df_feature.index)['horario'].get_group('2019-06-19 ')[0]
df_feature.groupby(df_feature.index)['horario'].value_counts()
for i in range(0, len(nove_compra_error)):
df_feature.loc[(df_feature.index == nove_compra_error.index[i]) & (df_feature['horario']==nove_compra_error['horario'][i]), '20mma_maxima_2m'] = nove_compra_error['20mma_maxima_2m'].values[i]
df_feature.loc[(df_feature.index == nove_compra_error.index[i]) & (df_feature['horario']==nove_compra_error['horario'][i]), '9mme_fechamento'] = nove_compra_error['9mme_fechamento'].values[i]
for i in range(0, len(nove_venda_error)):
df_feature.loc[(df_feature.index == nove_venda_error.index[i]) & (df_feature['horario']==nove_venda_error['horario'][i]), '20mma_minima_2m'] = nove_venda_error['20mma_minima_2m'].values[i]
df_feature.loc[(df_feature.index == nove_venda_error.index[i]) & (df_feature['horario']==nove_venda_error['horario'][i]), '9mme_fechamento'] = nove_venda_error['9mme_fechamento'].values[i]
nove_venda_error['20mma_minima_2m'][1]
df_feature.loc[(df_feature.index == nove_venda_error.index[1]) & (df_feature['horario']==nove_venda_error['horario'][1]), '20mma_minima_2m']
#Verificando se M20 2m high tem divergência com M20 2m Low
df_feature.columns
m20_error_high_2m = df_feature[df_feature['20mma_maxima_2m']<df_feature['20mma_minima_2m']][['20mma_maxima_2m', '20mma_minima_2m', 'horario']]
m20_error_high_2m['error'] = m20_error_high_2m['20mma_maxima_2m']-m20_error_high_2m['20mma_minima_2m']
m20_error_high_2m[['20mma_maxima_2m', '20mma_minima_2m']] = m20_error_high_2m[['20mma_minima_2m', '20mma_maxima_2m']].where(m20_error_high_2m['error']<0,
m20_error_high_2m[['20mma_maxima_2m', '20mma_minima_2m']].values)
for i in range(0, len(m20_error_high_2m)):
df_feature.loc[(df_feature.index == m20_error_high_2m.index[i]) & (df_feature['horario']==m20_error_high_2m['horario'][i]), '20mma_maxima_2m'] = m20_error_high_2m['20mma_maxima_2m'].values[i]
df_feature.loc[(df_feature.index == m20_error_high_2m.index[i]) & (df_feature['horario']==m20_error_high_2m['horario'][i]), '20mma_minima_2m'] = m20_error_high_2m['20mma_minima_2m'].values[i]
#Verificando se M20 5m high tem divergência com M20 5m Low
df_feature.columns
m20_error_high_5m = df_feature[df_feature['20mma_maxima_5m']<df_feature['20mma_minima_5m']][['20mma_maxima_5m', '20mma_minima_5m', 'horario']]
m20_error_high_5m['error'] = m20_error_high_5m['20mma_maxima_5m']-m20_error_high_5m['20mma_minima_5m']
m20_error_high_5m[['20mma_maxima_5m', '20mma_minima_5m']] = m20_error_high_5m[['20mma_minima_5m', '20mma_maxima_5m']].where(m20_error_high_5m['error']<0,
m20_error_high_5m[['20mma_maxima_5m', '20mma_minima_5m']].values)
for i in range(0, len(m20_error_high_5m)):
df_feature.loc[(df_feature.index == m20_error_high_5m.index[i]) & (df_feature['horario']==m20_error_high_5m['horario'][i]), '20mma_maxima_5m'] = m20_error_high_5m['20mma_maxima_5m'].values[i]
df_feature.loc[(df_feature.index == m20_error_high_5m.index[i]) & (df_feature['horario']==m20_error_high_5m['horario'][i]), '20mma_minima_5m'] = m20_error_high_5m['20mma_minima_5m'].values[i]
#Salvando planilha tratada
df_feature.to_excel('WDO Tratado.xlsx')
#Quais foram as operações com maior frequência? PLOT
df_feature.columns
df_compra = df_feature[df_feature['tipo_de_negociacao']=='compra']['tipo_de_negociacao']
df_venda = df_feature[df_feature['tipo_de_negociacao']=='venda']['tipo_de_negociacao']
labels = [df_compra.values[0],df_venda.values[0]]
ind = np.arange(len(labels))
values = [len(df_compra), len(df_venda)]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1,1,1)
ax.set_title('Quantidade de Operações de Compra e Veda \n Realizado pela Estratégia de negociação', fontsize=15)
ax.set_xlabel('Tipo de Operação', fontsize=15)
ax.set_ylabel('Quantidade de Operações Realizadas', fontsize=15)
ax.set_xticklabels(['Compra', 'Venda'], fontsize=15)
ax.set_yticklabels(np.arange(0,501,100), fontsize=15)
rects1= ax.bar('Compra', len(df_compra), width, edgecolor='black')
rects2=ax.bar('Venda', len(df_venda), width, edgecolor='black')
ax.set_xticks(ind)
autolabel(rects1,ax,df_feature)
autolabel(rects2,ax,df_feature)
plt.tight_layout()
#Quais foram os dias com maiores operações? PLOT
df_feature.columns
df_compra = df_feature[df_feature['tipo_de_negociacao']=='compra'][['horario','tipo_de_negociacao']]
df_venda = df_feature[df_feature['tipo_de_negociacao']=='venda'][['horario','tipo_de_negociacao']]
df_compra['data'] = df_compra.index
df_compra['dia'] = df_compra['data'].apply(lambda x: x.weekday())
df_compra['mes'] = df_compra['data'].apply(lambda x: x.month)
df_compra['hora'] = df_compra['horario'].apply(lambda x: x.hour)
df_venda['data'] = df_venda.index
df_venda['dia'] = df_venda['data'].apply(lambda x: x.weekday())
df_venda['mes'] = df_venda['data'].apply(lambda x: x.month)
df_venda['hora'] = df_venda['horario'].apply(lambda x: x.hour)
dias = {}
for i,v in enumerate(list(calendar.day_name)):
dias[i]=v
meses = {}
for i,v in enumerate(list(calendar.month_name)[1:],1):
meses[i]=v
dias_nomes_compra = np.array([])
for i in df_compra['dia']:
for j in range(0,len(dias)):
if i == list(dias.keys())[j]:
dias_nomes_compra = np.append(dias_nomes_compra,dias[j])
dias_nomes_venda = np.array([])
for i in df_venda['dia']:
for j in range(0,len(dias)):
if i == list(dias.keys())[j]:
dias_nomes_venda = np.append(dias_nomes_venda,dias[j])
def compra_venda(x):
for i in range(6,len(meses)+1):
if x == i:
return meses[x]
df_compra['dia'] = dias_nomes_compra
df_venda['dia'] = dias_nomes_venda
df_compra['mes'] = df_compra['mes'].apply(compra_venda )
df_venda['mes'] = df_venda['mes'].apply(compra_venda )
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(1,1,1)
labels = np.array([])
for i in range(0,5):
labels = np.append(labels,dias[i])
len_dia_compra = np.array([])
len_dia_venda = np.array([])
for i in labels:
len_dia_compra = np.append(len_dia_compra, len(df_compra[df_compra['dia']==i]))
len_dia_venda = np.append(len_dia_venda, len(df_venda[df_venda['dia']==i]))
ind = np.arange(len(labels))
ax.set_title('Tabela de Operações por Dias', fontsize=15)
ax.set_xticks(ind)
ax.set_xticklabels(['Segunda', 'Terça', 'Quarta', 'Quinta', 'Sexta'], fontsize=15)
ax.set_xlabel('Dias da semana', fontsize=15)
ax.set_yticklabels(np.arange(0,150,20),fontsize=15)
ax.set_ylabel('Quantidade de Operações por Dias', fontsize=15)
for i in range(0,len(labels)):
rects1 = ax.bar(ind+width/2, len_dia_compra, width=width, edgecolor='black')
rects2 = ax.bar(ind-width/2, len_dia_venda, width=width, edgecolor='black')
ax.legend(['compra','venda' ], fontsize=15, loc='best')
autolabel_without_pct(rects1,ax)
autolabel_without_pct(rects2,ax)
plt.tight_layout()
#Quais foram os mesmos com maires operações? PLOT
labels = []
len_mes_compra = []
len_mes_venda = []
[labels.append(meses[i]) for i in range(6,13)]
[len_mes_compra.append(len(df_compra[df_compra['mes']==i])) for i in labels]
[len_mes_venda.append(len(df_venda[df_venda['mes']==i])) for i in labels]
ind=np.arange(len(labels))
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1,1,1)
ax.set_title('Tabela de Operações por Mês', fontsize=15)
ax.set_xticks(ind)
ax.set_xticklabels(['Junho', 'Julho', 'Agosto', 'Setembro', 'Outubro', 'Novembro', 'Dezembro'], fontsize=15)
ax.set_yticklabels(np.arange(0,110,10), fontsize=15)
ax.set_ylabel('Quantidade de Operações por Mês', fontsize=15)
for i in range(0,len(len_mes_compra)):
rects1= ax.bar(ind+width/2, len_mes_compra, width=width, edgecolor='black')
rects2= ax.bar(ind-width/2, len_mes_venda, width=width, edgecolor='black')
ax.legend(['compra','venda' ], fontsize=15, loc='best')
autolabel_without_pct(rects1,ax,)
autolabel_without_pct(rects2,ax,)
plt.tight_layout()
#Quais horários obteve mais sinais? PLOT
bins = np.arange(9,18)
time = list(np.arange(9,18))
time_string = [str(time[i]) for i in range(0,len(time))]
len_time_compra = list(df_compra.groupby( | pd.cut(df_compra['hora'], bins) | pandas.cut |
from strategy.rebalance import get_relative_to_expiry_rebalance_dates, \
get_fixed_frequency_rebalance_dates, \
get_relative_to_expiry_instrument_weights
from strategy.calendar import get_mtm_dates
import pandas as pd
import pytest
from pandas.util.testing import assert_index_equal, assert_frame_equal
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key], check_names=False)
def test_tradeables_dates():
# no CME holdiays between this date range
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
exchanges = ["CME"]
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.date_range(
"2015-01-02", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with an adhoc holiday
holidays = [pd.Timestamp("2015-01-02")]
tradeable_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
exp_tradeable_dates = pd.date_range(
"2015-01-03", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with CME holiday (New Years day)
sd = pd.Timestamp("2015-01-01")
ed = pd.Timestamp("2015-01-02")
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.DatetimeIndex([pd.Timestamp("2015-01-02")])
assert_index_equal(tradeable_dates, exp_tradeable_dates)
def test_relative_to_expiry_rebalance_dates():
# each contract rolling individually, same offset
# change to ES and TY
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015TYH", "2015-02-27", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"]],
columns=["contract", "first_notice", "last_trade"]
)
offsets = -3
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-24", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling all monthly contracts together, same offset
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=True, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(["2015-01-02", "2015-02-24"])
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling each contract individually, different offset
offsets = {"ES": -3, "TY": -4}
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-23", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
def test_relative_to_expiry_weights():
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015ESU", "2015-09-18", "2015-09-18"],
["2015TYH", "2015-03-16", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"],
["2015TYU", "2015-08-31", "2015-09-21"]],
columns=["contract", "first_notice", "last_trade"]
)
# one generic and one product
dts = pd.date_range("2015-03-17", "2015-03-18", freq="B")
offsets = -3
root_gnrcs = {"ES": ["ES1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame(
[1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple products
dts = pd.date_range("2015-03-13", "2015-03-20", freq="B")
offsets = -1
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESH"),
( | pd.Timestamp("2015-03-17") | pandas.Timestamp |
####################################################################################################
# EXPERIMENT TRACKING ROUTINES
####################################################################################################
import numpy as np
import imageio
from matplotlib import pyplot as plt
from matplotlib import animation
from matplotlib import image as mpimg
import os, sys, json
import glob
import pickle
import pandas as pd
import itertools as it
from time import strftime, localtime, time, perf_counter
################### Global Parameters
EXPERIMENT_PATH = r'/experiments' # path where new experiments will be created
################### create_experiment
def create_experiment(process_run, params):
'''
Create experiment by processing runs over all combinations of hparam key values
param: process_run func function variable for run processing logic
param: params dict dict of parameters that are NOT hyper-parameters
param: hparams dict dict of hyperparameters whose value are all combinations per run
param: verbose int 0 = no output, 1 = only print, 2 = print + plots (as PNGs)
return: str exper_path returns the experiment path
** Assumes process_run method whose args match hparam keys.
For example...
from util_experiment import create_experiment
...
my_process_run(run_folder, params):
data_shape = hparam['data_shape'] # will be 32 on first run, 64 on second...
...
return(run_summary) where run_summary is dict of metrics
my_hparams = {data_shape: [32, 64, 128], ...} # will process_run for all combinations
start_experment('my_exp_name', my_process_run, my_hparams, verbose=2)
'''
# set base path to exerimentS folder (needs lots of space)
# set params variables for all experiment runs
exper_tag = params['exper_tag'] if 'exper_tag' in params else 'Unknown'
verbose = bool(params['verbose']) if 'verbose' in params else True # print everything instead of minimal
filelog = bool(params['filelog']) if 'filelog' in params else True # print to file instead of terminal
# create experiment folder under /logs
dt = get_datetime_string()
exper_path = EXPERIMENT_PATH + exper_tag + '-'+ dt
if not os.path.isdir(exper_path):
os.makedirs(exper_path)
# if filelog: # all stdout print to file? >>>>>>>>>>>> ignore filelog for Colab WandB version
# sys.stdout = open(exper_path + '/' + exper_tag + '-print.txt', 'w')
if verbose:
nRuns = np.prod([len(v) for v in params.values() if isinstance(v, list) ])
print(f'>>> START EXPERIMENT: path="{exper_path}" with {nRuns} runs using...\nparams = {params}')
# generate list of all hparams and its combinations
hparam_list = []
h_keys = []
# find all params that are hyper-params, having a list of values, rather than single scalar
for key, value in params.items():
# print('key = ', key, 'value = ', value, type(value)) # DEBUG: remove
if isinstance(value, list):
h_keys.append(key)
# generate all combinations among hyper-params list
combinations = it.product(*(params[key] for key in h_keys))
for tup in combinations:
s = '{ '
for i, key in enumerate(h_keys):
if isinstance(tup[i], str):
s += f"'{key}': '{tup[i]}', "
else:
s += f"'{key}': {tup[i]}, "
s = s[:-2] + ' }' # remove comma and add closing bracket
# print('tup = ', tup, 's = ', s) # >>>> TODO debug remove
hparam_list.append(eval(s))
# execute process_run for each hparam_list combination
result_list = []
for i, hparam in enumerate(hparam_list):
if verbose:
print('=' * 80)
print(f'>>> START RUN{i:02d} with hparams = {hparam}')
print(f'>>> INFO: START RUN{i:02d} with hparam = {hparam}', file=sys.stderr)
run_log = hparam
params.update(hparam)
# print('params = ', params)
# create new run folder with hparam value appended
run_folder = exper_path + f'/runs/RUN{i:02d}_' + dt[-4:] + '_' + \
''.join([f'{str(key)[:3]}{str(value).replace(".", "")}_' for (key, value) in hparam.items()])[:-1]
if not os.path.isdir(run_folder):
os.makedirs(run_folder)
start_time = perf_counter()
############# execute PROCESS_RUN function in calling experiment
run_metrics = process_run(run_folder, params)
run_log.update(run_metrics)
# get elapse time of this run and save
elapse_time = perf_counter() - start_time
run_log.update({'run_time': elapse_time})
# save run_log ndarray plus scalars as exper_results rows
run_dict = _save_run_log(run_folder, run_log, verbose)
result_list.append(run_dict)
if verbose:
# print(f'>>> END RUN{i:02d} elapse={elapse_time:0.3f} sec with run_log={list(run_log.keys())}')
print(f'>>> END RUN{i:02d} elapse={elapse_time:0.3f} sec run_log={",".join([str(key) for key in run_log.keys()])}')
# create run-results df with rows of metrics from all runs
run_results = pd.DataFrame(result_list)
total_run_time = run_results['run_time'].sum()
run_results.to_json(exper_path + '/run_results.json')
print(f'>>> INFO: Saving run results with total run time = {total_run_time/60:0.1f} min', file=sys.stderr)
# create animated GIF for all PNG plots across all runs
if len(hparam_list) > 1: # only if there is more than one PNG to convert to video
create_all_MP4(exper_path, nImagesPerSec=1.0)
print(f'>>> INFO: Generated MP4 video from all run PNGs', file=sys.stderr)
if verbose:
print('Experiment Results... ', run_results)
print(f'>>> END EXPERIMENT: elapse={(total_run_time/60):3.1f} min')
sys.stdout.close()
return exper_path
##########################################################################################
# Internal Subroutines
##########################################################################################
####################### _create_all_MP4
def create_all_MP4(exper_path, nImagesPerSec=1.0):
'''
find all PNG file in RUN01; then call _create_MP4 for each
exper_path str path to experiment folder
nImagesPerSec float sets images-per-second, so 0.5 is PNG image every 2 sec
'''
filenames = glob.glob(exper_path + '/runs/RUN00*/*.png')
if len(filenames) == 0:
print(f' WARNING: create_all_MP4 found no PNG plot files in first run folder')
return
for filename in filenames:
plot_name = filename[filename.rfind('\\') :] # find last slash before PNG filename
plot_name = plot_name[1 : -4] # trim slash in front and '.PNG' at end
# print('plot_name = ', plot_name)
_create_MP4(exper_path, plot_name, nImagesPerSec=nImagesPerSec)
####################### _create_aminGIF ...used by _create_all_aminGIF
def _create_MP4(exper_path, plot_name, nImagesPerSec):
'''
create aminated GIF from PNG plot in each run
exper_path str path to experiment folder
plot_name str name of plot PNG file (without '.png' ext)
fps float sets frames-per-second
'''
MP4_file = exper_path + '/' + plot_name + '.mp4'
filenames = glob.glob(exper_path + '/runs/RUN*' + '/' + plot_name + '.png')
filenames = sorted(filenames)
images = [mpimg.imread(f) for f in filenames]
print(f' INFO: generating {plot_name} MP4 with {len(images)} PNG images')
if len(filenames) == 0:
print(f' WARNING: Create_MP4 found no PNG files for {plot_name} plot')
# set/calculate video parameters
nFramesPerSec = 30
nImages = len(images)
nFramesPerImage = int(nFramesPerSec * nImagesPerSec)
nFrames = nFramesPerImage * nImages
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure(figsize=(9.6, 5.4), dpi=100)
plt.axis('off')
a = images[0]
# im = plt.imshow(a, interpolation=None, aspect='auto', vmin=0, vmax=1)
im = plt.imshow(a, interpolation=None, aspect='equal')
def animate(i):
ii = i // nFramesPerImage # repeat each image nFramesPerImage times
im.set_array(images[ii])
return [im]
# anim = animation.FuncAnimation(fig, animate, frames=len(images),
anim = animation.FuncAnimation(fig, animate, frames=nFrames, repeat=False)
# anim = animation.FuncAnimation(fig, animate, frames=nFrames, blit=True, repeat=False)
# interval=10, blit=True, repeat=False)
anim.save(MP4_file, fps=nFramesPerSec, extra_args=['-vcodec', 'libx264'])
####################### _create_all_aminGIF
def _create_all_aminGIF(exper_path, fps=0.5, repeat_first=5):
'''
find all PNG file in RUN01; then call _create_aminGIF for each
exper_path str path to experiment folder
fps float sets frames-per-second, so 0.5 is frame every 2 sec
repeat_first int repeats the first PNG as start of loop
'''
filenames = glob.glob(exper_path + '/runs/RUN00*/*.png')
print(f' INFO: generating {len(filenames)} anim-GIFs')
if len(filenames) == 0:
print(f' WARNING: create_aminGIF found no PNG plot files in first run folder')
return
for filename in filenames:
plot_name = filename[filename.rfind('\\') :] # find last slash before PNG filename
plot_name = plot_name[1 : -4] # trim slash in front and '.PNG' at end
# print('plot_name = ', plot_name)
_create_aminGIF(exper_path, plot_name, fps=fps, repeat_first=repeat_first)
####################### _create_aminGIF ...used by _create_all_aminGIF
def _create_aminGIF(exper_path, plot_name, fps=0.5, repeat_first=5):
'''
create aminated GIF from PNG plot in each run
exper_path str path to experiment folder
plot_name str name of plot PNG file (without '.png' ext)
fps float sets frames-per-second
repeat_first int repeats the first PNG as start of loop
'''
anim_file = exper_path + '/' + plot_name + '.gif'
with imageio.get_writer(anim_file, mode='I', fps=fps) as writer:
# collect filenames for plot_name across all runs
# filenames = glob.glob(exper_path + '/runs/RUN*' + '/' + plot_name + '.png')
filenames = glob.glob(exper_path + '/runs/RUN*' + '/' + plot_name + '.png')
filenames = sorted(filenames)
print(f' INFO: generating anim-GIF {plot_name} with {len(filenames)} PNGs')
if len(filenames) == 0:
print(f' WARNING: Create_AminGIF found no PNG file for {plot_name} plot')
# append each PNG together
for i, filename in enumerate(filenames):
if i == 0: # repeat the first PNG several times
for ii in range(repeat_first):
image = imageio.imread(filename)
writer.append_data(image)
else:
image = imageio.imread(filename)
writer.append_data(image)
# write anim GIF
image = imageio.imread(filename)
writer.append_data(image)
####################### _save_run_log
def _save_run_log(run_path, run_log, verbose):
'''
save all run_log items in run_path folder, depending on object type
run_path str path to current run folder
run_log dict keyvalue of result variables to be saved for this run
verbose int level of print verbosity
'''
print(f'>>> SAVE RUN LOG: run_path={run_path} with type={type(run_path)}')
# if (type(run_log) is dict): # TODO: fix by defining 'dict' type
# print(f'ERROR: run_log is not type DICT')
dict = {}
for key, value in run_log.items(): # TODO - save model, history as ndarray
value_str = ''
if isinstance(value, int) or isinstance(value, np.int32):
dict.update({key: value})
value_str = str(value)
elif isinstance(value, float) or isinstance(value, np.float32):
dict.update({key: value})
value_str = str(value)
elif isinstance(value, np.ndarray):
np.save(run_path + '/' + str(key), value)
elif isinstance(value, list):
with open(run_path + '/' + str(key + '.pkl'), 'wb') as f:
# https://stackoverflow.com/questions/12309269/how-do-i-write-json-data-to-a-file/37795053#37795053 TODO:
pickle.dump(value, f)
# value.to_pickle(run_path + '/' + str(key))
else:
print(f' SAVE ERROR: var {key} with value {value} not saved')
if verbose: print(f' {key}: {type(value)} = {value_str}')
return dict
################### get_datetime_string
def get_datetime_string():
'''
Return datetime string like "YYYYMMDD-HHMMSS"
Note: add '%z' for '-ZZZZ' is GMT offset, like MST = -7000
'''
return strftime("%Y%m%d-%H%M%S", localtime())
######### get dict from arg
def get_dict_from_arg(arg_no):
'''
Returns dictionary pass in arg# as string
'''
# for arg in sys.argv:
# print('arg = ', arg)
data = sys.argv[arg_no]
# print('data = ', data, type(data))
data2 = str(data).replace("'", '"')
# print('data2 = ', data2, type(data))
dict = json.loads(data2)
# print('dict = ', dict)
# for key, value in dict.items():
# print(f'key={key} value={value}')
return dict
# return json.loads(str(sys.argv[arg]).replace("'", '"'))
#################################################################
# Save dataframe to experiment run_path
def save_dataframe(path, df_name, df_table): #>>>>> TODO use json instead ???
"""Save dataframe to path folder with name 'df_name' using pickle
Parameters
----------
path : str
file path to destination
df_name : str
df_table : [type]
[description]
"""
# df_table.to_pickle(f'{path}/{df_name}.pkl')
df_table.to_json(f'{path}/{df_name}.json')
# Load dataframe from a previous experiment run_path
def load_dataframe(path, df_name):
# df = pd.read_pickle(path + '\\' + df_name + '.pkl')
df = | pd.read_json(path + '\\' + df_name + '.json') | pandas.read_json |
#!/usr/bin/env python3
import ccxt
from configparser import ConfigParser
import json
import os
import pickle
import redis
import socket
import tempfile
import time
import threading
import zlib
import numpy as np
import talib.abstract as ta
from pandas import DataFrame, Series
from requests_futures.sessions import FuturesSession
from sklearn.preprocessing import MinMaxScaler
import keras.models
BLACK_LIST = ['BNB']
CRON_TIME = 15
TA_TIME_FRAME = '15m'
ML_TIME_FRAME = '1h' # '1h', '4h', '1d'
STEP_SIZE = 5
BOT_NAME = 'Detector'
HOST_NAME = socket.gethostname()
CONFIG_FILE = '{}/config.ini'.format(os.path.dirname(os.path.abspath(__file__)))
config = ConfigParser()
config.read(CONFIG_FILE)
session = FuturesSession()
rd = redis.StrictRedis(host=config['REDIS']['HOST'],
port=config['REDIS']['PORT'],
password=config['REDIS']['PASS'], db=0)
exchange = ccxt.binance({'apiKey': config['BINANCE']['KEY'],
'secret': config['BINANCE']['SECRET']})
def make_keras_picklable():
def __getstate__(self):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = {'model_str': model_str}
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls = keras.models.Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
class Symbol(object):
def __init__(self, obj):
self.__dict__ = json.loads(json.dumps(obj))
def log(text):
msg = '{} {} {} {}'.format(time.strftime("%d/%m/%Y %H:%M"), HOST_NAME, BOT_NAME, text)
url = 'https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}&parse_mode=markdown' \
.format(config['TELEGRAM']['BOT'], config['TELEGRAM']['CHAT'], msg)
session.get(url)
print(msg)
return
def crossed(series1, series2, direction=None):
if isinstance(series1, np.ndarray):
series1 = Series(series1)
if isinstance(series2, int) or isinstance(series2, float) or isinstance(series2, np.ndarray):
series2 = | Series(index=series1.index, data=series2) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_key_driver_analysis.ipynb (unless otherwise specified).
__all__ = ['KeyDriverAnalysis']
# Cell
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 500)
import time
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
import matplotlib.pyplot as plt
import shap
from .preprocessing import DataframePreprocessor
class KeyDriverAnalysis:
"""
Performs key driver analysis
"""
def __init__(self, df, outcome_col='outcome', text_col=None, include_cols=[], ignore_cols=[],
verbose=1):
"""
Instantiates the KeyDriverAnalysis instance.
"""
self.v = verbose
self.pp = None # set with call to _preprocess
self.df, self.x, self.y = self._preprocess(df, outcome_col=outcome_col, text_col=text_col,
include_cols=include_cols, ignore_cols=ignore_cols)
def _preprocess(self, df, outcome_col='outcome', text_col=None, include_cols=[], ignore_cols=[]):
"""
preprocesses DataFrame
"""
temp_treatment = 'CausalNLP_temp_treatment'
df = df.copy()
df[temp_treatment] = [0] * df.shape[0]
# preprocess
self.pp = DataframePreprocessor(treatment_col = temp_treatment,
outcome_col = outcome_col,
text_col=text_col,
include_cols=include_cols,
ignore_cols=ignore_cols,
verbose=self.v)
df, x, y, _ = self.pp.preprocess(df,
training=True,
min_df=0.05,
max_df=0.5,
ngram_range=(1,1),
stop_words='english')
return df, x, y
def correlations(self, outcome_only=True):
"""
Computes corelations between independent variables and outcome
"""
df = self.x.copy()
df[self.pp.outcome_col] = self.y
corrALL = df.apply(pd.to_numeric, errors='coerce').corr()
if outcome_only:
df_results = corrALL[[self.pp.outcome_col]]
df_results = df_results.sort_values(by=self.pp.outcome_col, key=abs, ascending=False)
return df_results.iloc[1: , :]
#return df_results.sort_values(by=[self.pp.outcome_col])
else:
return corrALL
def importances(self, plot=True, split_pct=0.2,
use_shap=False, shap_background_size=50,
rf_model=None, n_estimators=100, n_jobs=-1, random_state=42):
"""
Identifies important predictors using a RandomForest model.
"""
X_train, X_test, y_train, y_test = train_test_split(self.x.values, self.y.values,
test_size=split_pct,
random_state=random_state)
rf_type = RandomForestClassifier if self.pp.is_classification else RandomForestRegressor
rf = rf_type(n_estimators = n_estimators,
n_jobs = n_jobs,
oob_score = True,
bootstrap = True,
random_state = random_state)
rf.fit(X_train, y_train)
if self.v:
print('R^2 Training Score: {:.2f} \nOOB Score: {:.2f} \nR^2 Validation Score: {:.2f}'.format(
rf.score(X_train, y_train),
rf.oob_score_,
rf.score(X_test, y_test)))
driverNames = self.x.columns.values
if use_shap:
explainer = shap.KernelExplainer(rf.predict, X_test[:shap_background_size,:])
shap_values = explainer.shap_values(X_test[:shap_background_size,:])
if plot:
shap.summary_plot(shap_values, X_test[:shap_background_size,:], feature_names=driverNames)
vals = np.abs(shap_values).mean(0)
df_results = pd.DataFrame(list(zip(driverNames, vals)),
columns=['Driver','Importance'])
df_results.sort_values(by=['Importance'],
ascending=False, inplace=True)
return df_results
else:
df_results = pd.DataFrame(data = {'Driver': driverNames,
'Importance': rf.feature_importances_})
df_results = df_results.sort_values('Importance', ascending=False)
if plot:
feat_importances = | pd.Series(rf.feature_importances_, index=driverNames) | pandas.Series |
# coding=utf-8
from hielen2.source import CloudSource, ActionSchema, GeoInfoSchema
from hielen2.utils import LocalFile, ColorMap, Style, FTPPath
from hielen2.ext.source_rawsource import Source as RawSource
import hielen2.api.features as featman
from hielen2.mapmanager import Multiraster
from hielen2.cloudmanager import PotreeCM
from .cloudpainter import makemultilaz
import json
from pathlib import Path
from marshmallow import fields
from numpy import full
from pandas import read_csv, DataFrame, Series, DatetimeIndex
from matplotlib.cm import jet
from matplotlib.colors import rgb2hex
from xarray import open_rasterio
from shutil import copy
import geojson
from datetime import datetime
import traceback
series_file_date_parser = lambda x: datetime.strptime(x, "%d/%m/%Y %H.%M")
#mapbasename="basemap.tif"
class ConfigSchema(GeoInfoSchema):
#class ConfigSchema(ActionSchema):
_self_hints = {
"TinSAR Base" : {
0: ["master_cloud","references to master cloud csv in FTP",True],
},
"TinSAR Color Maps": {
0: ["displ_cmap","Displacement colormap range",True],
1: ["ampli_cmap","Amplitude colormap range",True],
},
"TinSAR Selected Points":{
0: ["point_style","style code for the selected points",True],
1: ["series_file","textfile containing selected points and dataseries of theirs",True]
}
}
master_cloud = FTPPath(required=True, allow_none=False)
displ_cmap = ColorMap(required=False,allow_none=True,default=None)
ampli_cmap = ColorMap(required=False,allow_none=True,default=None)
point_style = Style(required=False, allow_none=True,default=None)
series_file = FTPPath(required=False, allow_none=True)
class FeedSchema(ActionSchema):
_self_hints = {
"TinSAR Feed": {
0: ["displacement_cloud","reference to result cloud in FTP",True],
1: ["amplitude_cloud","refernce to radar amplitutde cloud in FTP",True],
2: ["displacement_geotiff","reference to result geotiff in FTP",True],
3: ["amplitude_geotiff","refernce to radar amplitude geotiff in FTP",True]
}
}
displacement_cloud = FTPPath(required=False, allow_none=True)
amplitude_cloud = FTPPath(required=False, allow_none=True)
displacement_geotiff = FTPPath(required=False, allow_none=True)
amplitude_geotiff = FTPPath(required=False, allow_none=True)
def get_imgname(mapname,timestamp,param):
return f"{mapname}_{timestamp[:14]}_{param}.tif"
class Source(CloudSource):
'''
PhotoMonitoring source manager
'''
def _config(self, brandnewconf=True, **kwargs):
if brandnewconf:
kwargs['opacity']=50
out=super().config(**kwargs)
chstruct={
"param": 'Displacement',
"struct": {
"cache": None,
"modules": {},
"mu":"mm",
"operands": {"output":"displacement"},
"operator": None
}
}
self.addParamSeries(**chstruct)
chstruct={
"param": 'Radar_Amplitude',
"struct": {
"cache": None,
"modules": {},
"mu":"mm",
"operands": {"output":"amplitude"},
"operator": None
}
}
self.addParamSeries(**chstruct)
else:
out=kwargs
timestamp=out['timestamp']
out['master_cloud']=kwargs['master_cloud']
confpath=self.hasher(timestamp)
mapmanager=Multiraster(self.uid,confpath)
mapmanager.mapcache.mkdir()
mapmanager.setMFparams(bands=3,crs='EPSG:4326')
self.filecache.mkdir(confpath)
#CONFIGURABILI: displ_cmap, ampli_cmap
def_cmap=[ [ a/100, rgb2hex(jet (a/100)[0:3]) ] for a in range(0,101,10) ]
if kwargs['displ_cmap'] is None:
kwargs['displ_cmap'] = ColorMap.make_colormap(def_cmap)
kwargs['displ_cmap']["norm"] = None
out['displ_cmap']=kwargs['displ_cmap']
if kwargs['ampli_cmap'] is None:
kwargs['ampli_cmap'] = ColorMap.make_colormap(def_cmap)
kwargs['ampli_cmap']["norm"] = None
out['ampli_cmap']=kwargs['ampli_cmap']
self.setParamOperands('Displacement',cmap=out["displ_cmap"])
self.setParamOperands('Radar_Amplitude',cmap=out["ampli_cmap"])
cloudman=PotreeCM(self.uid,confpath)
cloudman.cloudcache.mkdir()
clds=makemultilaz(out['master_cloud'],str(self.filecache / confpath ),basemanage='a')
for k,w in clds.items():
cloudman.makePotree(w,k)
#print(json.dumps(out,indent=4))
out['point_style']=kwargs['point_style']
try:
points_file=Path(kwargs["series_file"])
except Exception as e:
points_file = None
self._feed_subitems(points_file,out['point_style'])
if not brandnewconf:
##Ricreare le cloud associate alla config
try:
nextconf=self.getActionValues('config',slice(timestamp,None))[1]['timestamp']
except Exception as e:
nextconf=None
feeds=self.getActionValues('feed',slice(timestamp,nextconf))
for f in feeds:
feedkwargs=f['value']
self.feed(**feedkwargs)
return out
def config(self,**kwargs):
return self._config(brandnewconf=True,**kwargs)
def updateConfig(self,**kwargs):
return self._config(brandnewconf=False,**kwargs)
def cleanConfig(self,timestamp):
"""
da analizzare
"""
timestamp=self.hasher(timestamp)
self.filecache.rmdir(timestamp)
PotreeCM(self.uid,timestamp).cloudcache.rmdir()
Multiraster(self.uid,timestamp).mapcache.rmdir()
def _feed_subitems(self, points_file=None,point_style=None):
try:
subitems= set(self.getFeatureInfo('subitems'))
except Exception as e:
subitems= set([])
"""
associa punti a feature principale e crea serie dati
"""
if points_file is not None:
series=read_csv(points_file,sep=";",index_col=0,skiprows=3,parse_dates=[0],date_parser=series_file_date_parser)
points= | read_csv(points_file,sep=";",index_col=0,header=None) | pandas.read_csv |
import numpy as np
from pandas import Categorical, Series
import pandas._testing as tm
class TestUnique:
def test_unique_data_ownership(self):
# it works! GH#1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_unique(self):
# GH#714 also, dtype=float
ser = Series([1.2345] * 100)
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
# explicit f4 dtype
ser = Series([1.2345] * 100, dtype="f4")
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
def test_unique_nan_object_dtype(self):
# NAs in object arrays GH#714
ser = Series(["foo"] * 100, dtype="O")
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
def test_unique_none(self):
# decision about None
ser = Series([1, 2, 3, None, None, None], dtype=object)
result = ser.unique()
expected = np.array([1, 2, 3, None], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unique_categorical(self):
# GH#18051
cat = Categorical([])
ser = Series(cat)
result = ser.unique()
tm.assert_categorical_equal(result, cat)
cat = Categorical([np.nan])
ser = Series(cat)
result = ser.unique()
| tm.assert_categorical_equal(result, cat) | pandas._testing.assert_categorical_equal |
import numpy as np
import pandas as pd
MONTHS_IN_QUARTER = 3
MONTHS_IN_YEAR = 12
CONVERSION_YIELD = 0.06
CONVERSION_FV = 1.03
GLOBEX_CODES = ("ZN", "ZB", "UB", "ZT", "TN", "Z3N", "ZF")
def _n_and_v(globex_code, year_fraction):
mask = np.in1d(globex_code, ("ZN", "ZB", "UB", "TN"))
n = mask * np.nan
v = mask * np.nan
if mask.any():
values = (
np.floor(year_fraction / MONTHS_IN_QUARTER * MONTHS_IN_YEAR)
* MONTHS_IN_QUARTER
)
n[mask] = values if isinstance(values, float) else values[mask]
values = n * (n < 7) + MONTHS_IN_QUARTER * (n >= 7)
v[mask] = values if isinstance(values, float) else values[mask]
mask = np.in1d(globex_code, ("ZT", "Z3N", "ZF"))
if mask.any():
values = np.floor(year_fraction * MONTHS_IN_YEAR)
n[mask] = values if isinstance(values, float) else values[mask]
values = n * (n < 7) + (n - 6) * (n >= 7)
v[mask] = values if isinstance(values, float) else values[mask]
if np.isnan(n).all():
raise NotImplementedError(f"No {globex_code} deliverables found!")
return n, v
def conversion_factor(globex_code, coupon, time_to_maturity):
years = np.floor(time_to_maturity)
year_fraction = time_to_maturity - years
n, v = _n_and_v(globex_code, year_fraction)
a = 1 / pow(CONVERSION_FV, v / 6)
b = (coupon / 2) * (6 - v) / 6
c = 1 / pow(CONVERSION_FV, 2 * years + 1 * (n >= 7))
d = (coupon / CONVERSION_YIELD) * (1 - c)
factor = a * (coupon / 2 + c + d) - b
return factor if factor.size > 1 else float(factor)
def extract_deliverables(df):
deliverables = []
for code in GLOBEX_CODES:
tmp_df = df[find_deliverables_of(code, df.MATURITY)].copy()
tmp_df["DELIVERABLE"] = code
tmp_df["CONV_FACTOR"] = conversion_factor(
tmp_df.DELIVERABLE, tmp_df.COUPON / 100, tmp_df.MATURITY
)
deliverables.append(tmp_df)
deliverables = | pd.concat(deliverables) | pandas.concat |
import os
from matplotlib import use
use('Agg')
from matplotlib import pyplot as plt
from matplotlib.cm import get_cmap
plt.switch_backend('agg')
import cartopy.crs as ccrs
import numpy as np
from blackswan.utils import get_time
from blackswan import templating
from blackswan.utils import prepare_static_folder
import logging
LOGGER = logging.getLogger("PYWPS")
def get_time_nc(nc_file, tv='time'):
"""
returns all timestamps of given netcdf file as datetime list.
:param nc_file: NetCDF file(s)
:param tv: name of temporal dimension
:return format: netcdftime._datetime.datetime
"""
from netCDF4 import MFDataset, num2date
ds = MFDataset(nc_file)
try:
time = ds.variables[tv]
except:
tv = 'time_counter'
ds.close()
try:
ds = MFDataset(nc_file)
time = ds.variables[tv]
if (hasattr(time, 'units') and hasattr(time, 'calendar')) == True:
timestamps = num2date(time[:], time.units, time.calendar)
elif hasattr(time, 'units'):
timestamps = num2date(time[:], time.units)
else:
timestamps = num2date(time[:])
ds.close()
except Exception as e:
raise Exception
return timestamps
def pdf_from_analog(lon, lat, data, vmin, vmax, Nlin=30, domain=[-80,50,20,70], output='ana_map.pdf', title='Analogs'):
fig = plt.figure()
fig.set_size_inches(18.5, 10.5, forward=True)
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(domain, crs=ccrs.PlateCarree())
ax.coastlines(linewidth=0.8)
ax.gridlines()
levels = np.linspace(vmin, vmax, Nlin)
cmap = get_cmap("RdBu_r")
data_map = ax.contourf(lon, lat, data, levels=levels, extend='both', cmap=cmap, projection=ccrs.PlateCarree())
data_cbar = plt.colorbar(data_map, extend='both', shrink=0.6)
data_cont = ax.contour(lon, lat, data, levels=levels, linewidths=0.8, colors="white", linestyles='dashed', projection=ccrs.PlateCarree())
plt.clabel(data_cont, inline=1, fmt='%1.0f')
plt.title(title)
plt.tight_layout()
pdffilename = output
plt.savefig(pdffilename)
fig.clf()
plt.close(fig)
return pdffilename
def pdf_from_ld(x, y, n_set=50, output='ld_dist.pdf'):
fig = plt.figure()
fig.set_size_inches(18.5, 10.5, forward=True)
xedges, yedges = np.linspace(0, 25, n_set), np.linspace(0, 1, n_set)
hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
xidx = np.clip(np.digitize(x, xedges), 0, hist.shape[0]-1)
yidx = np.clip(np.digitize(y, yedges), 0, hist.shape[1]-1)
c = hist[xidx, yidx]
plt.scatter(x, y, c=c, cmap='jet')
pdffilename = output
plt.savefig(pdffilename)
fig.clf()
plt.close(fig)
return pdffilename
def get_configfile(files,
seasoncyc_base=None,
seasoncyc_sim=None,
base_id='NCEP',
sim_id='NCEP',
timewin=1,
varname='slp',
seacyc=False,
cycsmooth=91,
nanalog=20,
seasonwin=30,
distfun='rms',
outformat='.txt',
period=["1973-01-01", "2012-12-31"],
bbox="-180.0,-90.0,180,90.0",
calccor=True,
silent=False,
config_file = "config.txt"):
"""
Generates the configuration file for the CASTf90 calculation.
TODO: use jjinja template
:param files: input files (reference period and period for analyses)
:param timewin: number of days the distance is averaged
:param varname: variable name in input files
:param seacyc: remove the smoothed seasonal cycle from the input fields (True/False)
:param cycsmooth: smoothing window for the seasonal cycle in days (should be an odd integer)
:param nanalog: Number of analogs to detect
:param distfun: Name of the distance function used to calculate the analogs.
(Supported values: 'rms' 'mahalanobis', 'S1' (Teweles and wobus), 'cosine' (correlation)
and - still experimental - 'of' (displacement and amplitude score based on optical flow image distortion)
:param outformat: file format for output ('txt' or 'nc' (default))
:param analysis_period: dates for which analogs are desired
:param period: reference period in which analogs are picked (for netcdf output attributes)
:param bbox: coordinates for the region to be analysed
:param calccor: calculate rank correlation for analog fields (True/False)
:param silent: handling of log file output
:returns: configuration file
"""
from datetime import datetime as dt
date_stamp = dt.strftime(dt.now(), format='%Y%m%d_%H%M%S')
LOGGER.info('start configuration file preparation at: %s' % (date_stamp))
# convert True/False to Fortran syntax
seacyc = str(seacyc)
calccor = str(calccor)
silent = str(silent)
# write stuff to configuration file
# NB: if order or format or number changes, need to edit wps_analogs_viewer.py
# and template_analogviewer.html where these scripts read in the config
# params
# config_file = "config.txt"
config = open(config_file, "w")
config.write(
'!Configuration file for CASTf90 analogs processes deployed in blackswan\n')
config.write('!Created : %s \n' % (date_stamp))
config.write('!Version : 0.1.5 \n')
config.write('&FILES \n')
config.write(' my_files%archivefile = "{file}" \n'.format(
file=os.path.relpath(files[0])))
config.write(' my_files%simulationfile = "{file}" \n'.format(
file=os.path.relpath(files[1])))
config.write(' my_files%outputfile = "{file}" \n'.format(
file=os.path.relpath(files[2])))
if seacyc is not 'False':
config.write(' my_files%seacycfilebase = "{file}" \n'.format(
file=os.path.relpath(seasoncyc_base)))
config.write(' my_files%seacycfilesim = "{file}" \n'.format(
file=os.path.relpath(seasoncyc_sim)))
config.write('/ \n')
config.write('&PARAM \n')
config.write(' my_params%timewin = {timewin} \n'.format(timewin=timewin))
config.write(' my_params%varname = "{varname}" \n'.format(varname=varname))
config.write(' my_params%seacyc = .{seacyc}. \n'.format(
seacyc=seacyc.upper()))
config.write(' my_params%cycsmooth = {cycsmooth} \n'.format(
cycsmooth=cycsmooth))
config.write(' my_params%nanalog = {nanalog} \n'.format(nanalog=nanalog))
config.write(' my_params%seasonwin = {seasonwin} \n'.format(
seasonwin=seasonwin))
config.write(' my_params%distfun = "{distfun}" \n'.format(distfun=distfun))
config.write(' my_params%calccor = .{calccor}. \n'.format(
calccor=calccor.upper()))
config.write(' my_params%oformat = "{outformat}" \n'.format(
outformat=outformat)) # ".txt" # ! if equals ".nc"
config.write(' my_params%silent = .{silent}.\n'.format(
silent=silent.upper()))
config.write('/\n')
config.write('&ATTS\n')
config.write(' my_atts%simsource = "{sim_id}" \n'.format(sim_id=sim_id)) # model name
config.write(
' my_atts%predictorvar = "{varname}" \n'.format(varname=varname))
config.write(' my_atts%archisource = "{base_id}" \n'.format(base_id=base_id))
config.write(' my_atts%archiperiod = "{start},{end}" \n'.format(
start=period[0], end=period[1]))
config.write(' my_atts%predictordom = "{bbox}" \n'.format(bbox=bbox))
config.write('/\n')
config.close()
return config_file
# def subset(resource=[], bbox='-80,50,22.5,70'):
# """
# OBSOLETE
# Returns a subset.
# :param resource: netCDF input files of one dataset
# :param bbox: bounding box
# :return: subset netCDF file
# """
# from tempfile import mkstemp
# from cdo import Cdo
# cdo = Cdo()
# resource.sort()
# ip, nc_concat = mkstemp(dir='.',suffix='.nc')
# nc_concat = cdo.cat(input=resource, output=nc_concat)
# ip, nc_subset = mkstemp(dir='.',suffix='.nc')
# nc_subset = cdo.sellonlatbox('%s' % bbox, input=nc_concat, output=nc_subset)
# LOGGER.info('subset done: %s ' % nc_subset)
# return nc_subset
def seacyc(archive, simulation, basecyc='seasoncyc_base.nc', simcyc='seasoncyc_sim.nc', method='base'):
"""
Subtracts the seasonal cycle.
:param archive: netCDF file containing the reference period
:param simulation: netCDF file containing the period to be analysed
:param method: method to generate the seasonal cycle files
base = seasonal cycle generated from reference period
sim = seasonal cycle generated from period to be analysed
own = seasonal cycle generated for both time windows
:return [str,str]: two netCDF filenames for analysis and reference period (located in working directory)
"""
try:
LOGGER.debug('seacyc started with method: %s' % method)
from shutil import copy
from blackswan.ocgis_module import call
# from blackswan.utils import get_variable
from cdo import Cdo
cdo = Cdo(env=os.environ)
if method == 'base':
seasoncyc_base = cdo.ydaymean(
input=archive, output=basecyc)
seasoncyc_sim = simcyc
copy(seasoncyc_base, seasoncyc_sim)
LOGGER.debug('seasoncyc_base calculated : %s' % seasoncyc_base)
elif method == 'sim':
seasoncyc_sim = cdo.ydaymean(
input=simulation, output=simcyc)
seasoncyc_base = basecyc
copy(seasoncyc_sim, seasoncyc_base)
elif method == 'own':
seasoncyc_base = cdo.ydaymean(
input=archive, output=basecyc)
seasoncyc_sim = cdo.ydaymean(
input=simulation, output=simcyc)
nt = cdo.ntime(input=seasoncyc_sim)
nt = int(nt[0]) # check number of timesteps
if (nt<365):
LOGGER.debug('Simulation is to short ( %s ) to calculate seasonal cycle, use reference instead', str(nt))
seasoncyc_sim = simcyc
copy(seasoncyc_base, seasoncyc_sim)
else:
raise Exception('normalisation method not found')
except Exception:
msg = 'seacyc function failed:'
LOGGER.exception(msg)
raise Exception(msg)
return seasoncyc_base, seasoncyc_sim
# obsolete, remove
# def config_edits(configfile):
# """
# Edits the CASTf90 configuration file. Removes filepaths.
# :param configfile: configfile name with its path
# :return str: modified_configfile name
# """
# try:
# # Read in the file
# filedata = None
# with open(configfile, 'r') as file:
# filedata = file.read()
# # Replace the target string
# filedata = filedata.replace(
# '/home/scratch01/sradanov/A2C2/NCEP/', '').replace('/home/estimr2/sradanov/Operational/', '')
# # Write the file out again
# with open(configfile, 'w') as file:
# file.write(filedata)
# LOGGER.info('configfile modified')
# except Exception:
# LOGGER.exeption('Failed to modify configfile:')
# return configfile
def reformat_analogs(analogs, prefix='modified-analogfile.tsv'):
"""
Reformats analogs results file for analogues viewer code.
:param analogs: output from analog_detection process
:return str: reformatted analogs file for analogues viewer
"""
# import numpy as np
import pandas as pd
try:
num_cols = 3 # dateAnlg, Dis, Corr
# Create dataframe and read in output csv file of analogs process
dfS = | pd.DataFrame() | pandas.DataFrame |
import re
import pandas as pd
# Dataframe cleaning
from qutil.format.number import fmtl, fmtn, fmtpx
def clean_column_names(df, inplace=True):
clean_cols = df.columns.str.lower().str.replace(' ', '_')
clean_cols = [re.sub(r'\W+', '', x) for x in clean_cols]
clean_cols = [re.sub('__', '_', x) for x in clean_cols]
df.columns = clean_cols
return None
def proper_column_names(df, inplace=False, for_display=True):
"""
Clean column names
:param df: DataFrame
:param inplace:
:return:
"""
if not inplace:
df = df.copy()
clean_cols = df.columns.str.replace(' ', '_')
clean_cols = [re.sub(r'\W+', '', x) for x in clean_cols]
clean_cols = [re.sub('__', '_', x) for x in clean_cols]
clean_cols = [x.title() for x in clean_cols if not (x.isupper())]
if for_display:
clean_cols = [x.replace('_', ' ') for x in clean_cols]
df.columns = clean_cols
if not inplace:
return df
def cols_to_datetime(df, keys):
if isinstance(keys, list):
for key in keys:
df[key] = | pd.to_datetime(df[key]) | pandas.to_datetime |
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
from pvlib import irradiance
from pvlib import atmosphere
from conftest import (requires_ephem, requires_numba, needs_numpy_1_10,
pandas_0_22)
# setup times and location to be tested.
tus = Location(32.2, -111, 'US/Arizona', 700)
# must include night values
times = pd.date_range(start='20140624', freq='6H', periods=4, tz=tus.tz)
ephem_data = solarposition.get_solarposition(
times, tus.latitude, tus.longitude, method='nrel_numpy')
irrad_data = tus.get_clearsky(times, model='ineichen', linke_turbidity=3)
dni_et = irradiance.extraradiation(times.dayofyear)
ghi = irrad_data['ghi']
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('input, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', requires_ephem('pyephem')])
def test_extraradiation(input, expected, method):
out = irradiance.extraradiation(input)
assert_allclose(out, expected, atol=1)
@requires_numba
def test_extraradiation_nrel_numba():
result = irradiance.extraradiation(times, method='nrel', how='numba', numthreads=8)
assert_allclose(result, [1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_extraradiation_epoch_year():
out = irradiance.extraradiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
def test_extraradiation_invalid():
with pytest.raises(ValueError):
irradiance.extraradiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.grounddiffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series():
ground_irrad = irradiance.grounddiffuse(40, ghi)
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0():
ground_irrad = irradiance.grounddiffuse(40, ghi, albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface():
with pytest.raises(KeyError):
irradiance.grounddiffuse(40, ghi, surface_type='invalid')
def test_grounddiffuse_albedo_surface():
result = irradiance.grounddiffuse(40, ghi, surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series():
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
result = irradiance.klucher(40, 180, 100, 900, 20, 180)
assert_allclose(result, 88.3022221559)
def test_klucher_series():
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [0, 37.446276, 109.209347, 56.965916], atol=1e-4)
def test_haydavies():
result = irradiance.haydavies(40, 180, irrad_data['dhi'], irrad_data['dni'],
dni_et,
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [0, 14.967008, 102.994862, 33.190865], atol=1e-4)
def test_reindl():
result = irradiance.reindl(40, 180, irrad_data['dhi'], irrad_data['dni'],
irrad_data['ghi'], dni_et,
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
assert_allclose(result, [np.nan, 15.730664, 104.131724, 34.166258], atol=1e-4)
def test_king():
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], am)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=times)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out, df_components = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], am, return_components=True)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=times)
expected_components = pd.DataFrame(
np.array([[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['isotropic', 'circumsolar', 'horizon'],
index=times
)
if pandas_0_22():
expected_for_sum = expected.copy()
expected_for_sum.iloc[2] = 0
else:
expected_for_sum = expected
sum_components = df_components.sum(axis=1)
assert_series_equal(out, expected, check_less_precise=2)
assert_frame_equal(df_components, expected_components)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
@needs_numpy_1_10
def test_perez_arrays():
am = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values, am.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
def test_liujordan():
expected = pd.DataFrame(np.
array([[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
# klutcher (misspelling) will be removed in 0.3
def test_total_irrad():
models = ['isotropic', 'klutcher', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
AM = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
for model in models:
total = irradiance.total_irrad(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=AM,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_total_irrad_scalars(model):
total = irradiance.total_irrad(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_globalinplane():
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
airmass = atmosphere.relativeairmass(ephem_data['apparent_zenith'])
gr_sand = irradiance.grounddiffuse(40, ghi, surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], airmass)
irradiance.globalinplane(
aoi=aoi, dni=irrad_data['dni'], poa_sky_diffuse=diff_perez,
poa_ground_diffuse=gr_sand)
def test_disc_keys():
clearsky_data = tus.get_clearsky(times, model='ineichen',
linke_turbidity=3)
disc_data = irradiance.disc(clearsky_data['ghi'], ephem_data['zenith'],
ephem_data.index)
assert 'dni' in disc_data.columns
assert 'kt' in disc_data.columns
assert 'airmass' in disc_data.columns
def test_disc_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
disc_data = irradiance.disc(ghi, zenith, times, pressure=pressure)
assert_almost_equal(disc_data['dni'].values,
np.array([830.46, 676.09]), 1)
def test_dirint():
clearsky_data = tus.get_clearsky(times, model='ineichen',
linke_turbidity=3)
pressure = 93193.
dirint_data = irradiance.dirint(clearsky_data['ghi'], ephem_data['zenith'],
ephem_data.index, pressure=pressure)
def test_dirint_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure)
assert_almost_equal(dirint_data.values,
np.array([ 888. , 683.7]), 1)
def test_dirint_nans():
times = pd.DatetimeIndex(start='2014-06-24T12-0700', periods=5, freq='6H')
ghi = pd.Series([np.nan, 1038.62, 1038.62, 1038.62, 1038.62], index=times)
zenith = pd.Series([10.567, np.nan, 10.567, 10.567, 10.567,], index=times)
pressure = pd.Series([93193., 93193., np.nan, 93193., 93193.], index=times)
temp_dew = pd.Series([10, 10, 10, np.nan, 10], index=times)
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=temp_dew)
assert_almost_equal(dirint_data.values,
np.array([np.nan, np.nan, np.nan, np.nan, 893.1]), 1)
def test_dirint_tdew():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=10)
assert_almost_equal(dirint_data.values,
np.array([892.9, 636.5]), 1)
def test_dirint_no_delta_kt():
times = pd.DatetimeIndex(['2014-06-24T12-0700','2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=False)
assert_almost_equal(dirint_data.values,
np.array([861.9, 670.4]), 1)
def test_dirint_coeffs():
coeffs = irradiance._get_dirint_coeffs()
assert coeffs[0,0,0,0] == 0.385230
assert coeffs[0,1,2,1] == 0.229970
assert coeffs[3,2,6,3] == 1.032260
def test_erbs():
ghi = pd.Series([0, 50, 1000, 1000])
zenith = pd.Series([120, 85, 10, 10])
doy = pd.Series([1, 1, 1, 180])
expected = pd.DataFrame(np.
array([[ -0.00000000e+00, 0.00000000e+00, -0.00000000e+00],
[ 9.67127061e+01, 4.15709323e+01, 4.05715990e-01],
[ 7.94187742e+02, 2.17877755e+02, 7.18119416e-01],
[ 8.42358014e+02, 1.70439297e+02, 7.68919470e-01]]),
columns=['dni', 'dhi', 'kt'])
out = irradiance.erbs(ghi, zenith, doy)
assert_frame_equal(np.round(out, 0), np.round(expected, 0))
def test_erbs_all_scalar():
ghi = 1000
zenith = 10
doy = 180
expected = OrderedDict()
expected['dni'] = 8.42358014e+02
expected['dhi'] = 1.70439297e+02
expected['kt'] = 7.68919470e-01
out = irradiance.erbs(ghi, zenith, doy)
for k, v in out.items():
assert_allclose(v, expected[k], 5)
@needs_numpy_1_10
def test_dirindex():
clearsky_data = tus.get_clearsky(times, model='ineichen',
linke_turbidity=3)
ghi = pd.Series([0, 0, 1038.62, 254.53], index=times)
ghi_clearsky = pd.Series(
np.array([0., 79.73860422, 1042.48031487, 257.20751138]),
index=times
)
dni_clearsky = pd.Series(
np.array([0., 316.1949056, 939.95469881, 646.22886049]),
index=times
)
zenith = pd.Series(
np.array([124.0390863, 82.85457044, 10.56413562, 72.41687122]),
index=times
)
pressure = 93193.
tdew = 10.
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky,
zenith, times, pressure=pressure,
temp_dew=tdew)
dirint_close_values = irradiance.dirint(ghi, zenith, times,
pressure=pressure,
use_delta_kt_prime=True,
temp_dew=tdew).values
expected_out = np.array([np.nan, 0., 748.31562753, 630.72592644])
tolerance = 1e-8
assert np.allclose(out, expected_out, rtol=tolerance, atol=0,
equal_nan=True)
tol_dirint = 0.2
assert np.allclose(out.values, dirint_close_values, rtol=tol_dirint, atol=0,
equal_nan=True)
def test_dni():
ghi = pd.Series([90, 100, 100, 100, 100])
dhi = pd.Series([100, 90, 50, 50, 50])
zenith = | pd.Series([80, 100, 85, 70, 85]) | pandas.Series |
import os
import pandas as pd
import json
from cloud_pricing.data.interface import FixedInstance
class AWSProcessor(FixedInstance):
aws_gpu_ram = {
'p3': ('V100', 16),
'p2': ('K80', 12),
'g4': ('T4', 16),
'g3': ('M60', 8)
}
aws_pricing_index_ohio_url = "https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/us-east-1/index.json"
include_cols = [
'instanceType', 'location', 'productFamily',
'instanceFamily', 'currentGeneration',
'physicalProcessor', 'clockSpeed', 'sku',
'storage', 'tenancy', 'operatingSystem',
'capacitystatus', 'vcpu', 'memory', 'gpu'
]
def __init__(self, table_name='aws_data.pkl'):
super().__init__(table_name)
def filter(self, *args, **kwargs):
if kwargs['spot']:
print("AWSProcessor currently doesn't support spot instances since they are updated live.")
return
return super().filter(*args, **kwargs)
def setup(self):
print("Downloading latest AWS data...")
# Download latest pricing data
data_name = 'ohio-ec2.json'
self.download_data(self.aws_pricing_index_ohio_url, data_name)
with open('ohio-ec2.json', 'r') as f:
raw_aws_data=json.load(f)
# Create products table
data = []
for p in raw_aws_data['products'].values():
data.append({
'sku': p['sku'],
'productFamily': p['productFamily'],
**p['attributes']
})
products_df = pd.DataFrame(data).filter(self.include_cols).set_index('sku')
# Create pricing table
on_demand = raw_aws_data['terms']['OnDemand']
pricing_data = []
all_skus = set()
for sku,v in on_demand.items():
for offer in v.values():
for dim in offer['priceDimensions'].values():
if sku in all_skus: print("Duplicate SKU", sku)
else: all_skus.add(sku)
pricing_data.append({
'sku': sku,
'Price ($/hr)': dim['pricePerUnit']['USD'] if 'USD' in dim['pricePerUnit'] else dim['pricePerUnit']
})
pricing_df = | pd.DataFrame(pricing_data) | pandas.DataFrame |
import json
import pandas as pd
def get_char_lines(script):
dialog_json = script["dialog"]
dialog = json_normalize(dialog_json)
return dialog
def get_char_names(script):
char_names = script["characters"]
return char_names
def get_char_lines(char_names, dialog):
char_lines = list()
for i in range(len(char_names)):
temp = dialog.loc[dialog['character'] == char_names[i]]
char_lines.append(temp)
return char_lines
def get_lines_array(char_lines):
lines = | pd.concat(char_lines, axis=0) | pandas.concat |
from __future__ import division
import numpy as np
import os.path
import sys
import pandas as pd
from base.uber_model import UberModel, ModelSharedInputs
from .therps_functions import TherpsFunctions
import time
from functools import wraps
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
print("therps_model_rest.py@timefn: " + fn.func_name + " took " + "{:.6f}".format(t2 - t1) + " seconds")
return result
return measure_time
class TherpsInputs(ModelSharedInputs):
"""
Input class for Therps.
"""
def __init__(self):
"""Class representing the inputs for Therps"""
super(TherpsInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
"""
Therps constructor.
:param chem_name:
:param use:
:param formu_name:
:param percent_act_ing:
:param foliar_diss_hlife:
:param num_apps:
:param app_interval:
:param application_rate:
:param ld50_bird:
:param lc50_bird:
:param noaec_bird:
:param noael_bird:
:param species_of_the_tested_bird_avian_ld50:
:param species_of_the_tested_bird_avian_lc50:
:param species_of_the_tested_bird_avian_noaec:
:param species_of_the_tested_bird_avian_noael:
:param tw_bird_ld50:
:param tw_bird_lc50:
:param tw_bird_noaec:
:param tw_bird_noael:
:param mineau_sca_fact:
:param aw_herp_sm:
:param aw_herp_md:
:param aw_herp_slg:
:param awc_herp_sm:
:param awc_herp_md:
:param awc_herp_lg:
:param bw_frog_prey_mamm:
:param bw_frog_prey_herp:
:return:
"""
self.use = pd.Series([], dtype="object", name="use")
self.formu_name = pd.Series([], dtype="object", name="formu_name")
self.percent_act_ing = pd.Series([], dtype="float", name="percent_act_ing")
self.foliar_diss_hlife = pd.Series([], dtype="float64", name="foliar_diss_hlife")
self.num_apps = pd.Series([], dtype="int64", name="num_apps")
self.app_interval = pd.Series([], dtype="int", name="app_interval")
self.application_rate = pd.Series([], dtype="float", name="application_rate")
self.ld50_bird = pd.Series([], dtype="float", name="ld50_bird")
self.lc50_bird = pd.Series([], dtype="float", name="lc50_bird")
self.noaec_bird = pd.Series([], dtype="float", name="noaec_bird")
self.noael_bird = pd.Series([], dtype="float", name="noael_bird")
self.species_of_the_tested_bird_avian_ld50 = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_ld50")
self.species_of_the_tested_bird_avian_lc50 = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_lc50")
self.species_of_the_tested_bird_avian_noaec = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_noaec")
self.species_of_the_tested_bird_avian_noael = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_noael")
self.tw_bird_ld50 = pd.Series([], dtype="float", name="tw_bird_ld50")
self.tw_bird_lc50 = pd.Series([], dtype="float", name="tw_bird_lc50")
self.tw_bird_noaec = pd.Series([], dtype="float", name="tw_bird_noaec")
self.tw_bird_noael = pd.Series([], dtype="float", name="tw_bird_noael")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.aw_herp_sm = pd.Series([], dtype="float", name="aw_herp_sm")
self.aw_herp_md = pd.Series([], dtype="float", name="aw_herp_md")
self.aw_herp_lg = pd.Series([], dtype="float", name="aw_herp_lg")
self.awc_herp_sm = pd.Series([], dtype="float", name="awc_herp_sm")
self.awc_herp_md = pd.Series([], dtype="float", name="awc_herp_md")
self.awc_herp_lg = pd.Series([], dtype="float", name="awc_herp_lg")
self.bw_frog_prey_mamm = pd.Series([], dtype="float", name="bw_frog_prey_mamm")
self.bw_frog_prey_herp = pd.Series([], dtype="float", name="bw_frog_prey_herp")
## application rates and days of applications
#self.app_rates = pd.Series([], dtype="object") #Series of lists, each list contains app_rates of a model simulation run
#self.day_out = pd.Series([], dtype="object") #Series of lists, each list contains day #'s of applications within a model simulaiton run
class TherpsOutputs(object):
"""
Output class for Therps.
"""
def __init__(self):
"""Class representing the outputs for Therps"""
super(TherpsOutputs, self).__init__()
## application rates and days of applications
#self.day_out = pd.Series([], dtype='object', name='day_out')
#self.app_rates = pd.Series([], dtype='object', name='app_rates')
# TODO: Add these back in after deciding how to handle the numpy arrays
# timeseries of concentrations related to herbiferous food sources
# self.out_c_ts_sg = pd.Series([], dtype='float') # short grass
# self.out_c_ts_blp = pd.Series([], dtype='float') # broad-leafed plants
# self.out_c_ts_fp = pd.Series([], dtype='float') # fruits/pods
#
# self.out_c_ts_mean_sg = pd.Series([], dtype='float') # short grass
# self.out_c_ts_mean_blp = pd.Series([], dtype='float') # broad-leafed plants
# self.out_c_ts_mean_fp = pd.Series([], dtype='float') # fruits/pods
# Table 5
self.out_ld50_ad_sm = pd.Series([], dtype='float', name="out_ld50_ad_sm")
self.out_ld50_ad_md = pd.Series([], dtype='float', name="out_ld50_ad_md")
self.out_ld50_ad_lg = pd.Series([], dtype='float', name="out_ld50_ad_lg")
self.out_eec_dose_bp_sm = pd.Series([], dtype='float', name="out_eec_dose_bp_sm")
self.out_eec_dose_bp_md = pd.Series([], dtype='float', name="out_eec_dose_bp_md")
self.out_eec_dose_bp_lg = pd.Series([], dtype='float', name="out_eec_dose_bp_lg")
self.out_arq_dose_bp_sm = pd.Series([], dtype='float', name="out_arq_dose_bp_sm")
self.out_arq_dose_bp_md = pd.Series([], dtype='float', name="out_arq_dose_bp_md")
self.out_arq_dose_bp_lg = pd.Series([], dtype='float', name="out_arq_dose_bp_lg")
self.out_eec_dose_fr_sm = pd.Series([], dtype='float', name="out_eec_dose_fr_sm")
self.out_eec_dose_fr_md = pd.Series([], dtype='float', name="out_eec_dose_fr_md")
self.out_eec_dose_fr_lg = pd.Series([], dtype='float', name="out_eec_dose_fr_lg")
self.out_arq_dose_fr_sm = pd.Series([], dtype='float', name="out_arq_dose_fr_sm")
self.out_arq_dose_fr_md = pd.Series([], dtype='float', name="out_arq_dose_fr_md")
self.out_arq_dose_fr_lg = pd.Series([], dtype='float', name="out_arq_dose_fr_lg")
self.out_eec_dose_hm_md = pd.Series([], dtype='float', name="out_eec_dose_hm_md")
self.out_eec_dose_hm_lg = pd.Series([], dtype='float', name="out_eec_dose_hm_lg")
self.out_arq_dose_hm_md = pd.Series([], dtype='float', name="out_arq_dose_hm_md")
self.out_arq_dose_hm_lg = pd.Series([], dtype='float', name="out_arq_dose_hm_lg")
self.out_eec_dose_im_md = pd.Series([], dtype='float', name="out_eec_dose_im_md")
self.out_eec_dose_im_lg = pd.Series([], dtype='float', name="out_eec_dose_im_lg")
self.out_arq_dose_im_md = pd.Series([], dtype='float', name="out_arq_dose_im_md")
self.out_arq_dose_im_lg = pd.Series([], dtype='float', name="out_arq_dose_im_lg")
self.out_eec_dose_tp_md = pd.Series([], dtype='float', name="out_eec_dose_tp_md")
self.out_eec_dose_tp_lg = pd.Series([], dtype='float', name="out_eec_dose_tp_lg")
self.out_arq_dose_tp_md = pd.Series([], dtype='float', name="out_arq_dose_tp_md")
self.out_arq_dose_tp_lg = pd.Series([], dtype='float', name="out_arq_dose_tp_lg")
# Table 6
self.out_eec_diet_herp_bl = pd.Series([], dtype='float', name="out_eec_diet_herp_bl")
self.out_eec_arq_herp_bl = pd.Series([], dtype='float', name="out_eec_arq_herp_bl")
self.out_eec_diet_herp_fr = pd.Series([], dtype='float', name="out_eec_diet_herp_fr")
self.out_eec_arq_herp_fr = pd.Series([], dtype='float', name="out_eec_arq_herp_fr")
self.out_eec_diet_herp_hm = pd.Series([], dtype='float', name="out_eec_diet_herp_hm")
self.out_eec_arq_herp_hm = | pd.Series([], dtype='float', name="out_eec_arq_herp_hm") | pandas.Series |
import re
from copy import copy
from typing import Iterable, Optional, Union
import pandas as pd
import requests
from bs4 import BeautifulSoup
from pvoutput.consts import (
MAP_URL,
PV_OUTPUT_COUNTRY_CODES,
PV_OUTPUT_MAP_COLUMN_NAMES,
REGIONS_URL,
)
_MAX_NUM_PAGES = 1024
def get_pv_systems_for_country(
country: Union[str, int],
ascending: Optional[bool] = None,
sort_by: Optional[str] = None,
max_pages: int = _MAX_NUM_PAGES,
region: Optional[str] = None,
) -> pd.DataFrame:
"""
Args:
country: either a string such as 'United Kingdom'
(see consts.PV_OUTPUT_COUNTRY_CODES for all recognised strings),
or a PVOutput.org country code, in the range [1, 257].
ascending: if True, ask PVOutput.org to sort results by ascending.
If False, sort by descending. If None, use PVOutput.org's default
sort order.
sort_by: The column to ask PVOutput.org to sort by. One of:
timeseries_duration,
average_generation_per_day,
efficiency,
power_generation,
capacity,
address,
name
max_pages: The maximum number of search pages to scrape.
Returns: pd.DataFrame with index system_id (int) and these columns:
name, system_DC_capacity_W, panel, inverter, address, orientation,
array_tilt_degrees, shade, timeseries_duration,
total_energy_gen_Wh, average_daily_energy_gen_Wh
average_efficiency_kWh_per_kW
"""
country_code = _convert_to_country_code(country)
regions = [region] if region else get_regions_for_country(country_code)
all_metadata = []
for region in regions:
for page_number in range(max_pages):
print(
"\rReading page {:2d} for region: {}".format(page_number, region),
end="",
flush=True,
)
url = _create_map_url(
country_code=country_code,
page_number=page_number,
ascending=ascending,
sort_by=sort_by,
region=region,
)
soup = get_soup(url)
if _page_is_blank(soup):
break
metadata = _process_metadata(soup)
metadata["region"] = region
all_metadata.append(metadata)
if not _page_has_next_link(soup):
break
return pd.concat(all_metadata)
############ LOAD HTML ###################
def _create_map_url(
country_code: Optional[int] = None,
page_number: Optional[int] = None,
ascending: Optional[bool] = None,
sort_by: Optional[str] = None,
region: Optional[str] = None,
) -> str:
"""
Args:
page_number: Get this page number of the search results. Zero-indexed.
The first page is page 0, the second page is page 1, etc.
"""
_check_country_code(country_code)
if ascending is None:
sort_order = None
else:
sort_order = "asc" if ascending else "desc"
if sort_by is None:
sort_by_pv_output_col_name = None
else:
try:
sort_by_pv_output_col_name = PV_OUTPUT_MAP_COLUMN_NAMES[sort_by]
except KeyError:
raise ValueError("sort_by must be one of {}".format(PV_OUTPUT_MAP_COLUMN_NAMES.keys()))
url_params = {
"country": country_code,
"p": page_number,
"d": sort_order,
"o": sort_by_pv_output_col_name,
"region": region,
}
url_params_list = [
"{}={}".format(key, value) for key, value in url_params.items() if value is not None
]
query_string = "&".join(url_params_list)
url = copy(MAP_URL)
if query_string:
url += "?" + query_string
return url
def _raise_country_error(country, msg=""):
country_codes = PV_OUTPUT_COUNTRY_CODES.values()
raise ValueError(
"Wrong value country='{}'. {}country must be an integer country"
" code in the range [{}, {}], or one of {}.".format(
country,
msg,
min(country_codes),
max(country_codes),
", ".join(PV_OUTPUT_COUNTRY_CODES.keys()),
)
)
def _check_country_code(country_code: Union[None, int]):
if country_code is None:
return
country_codes = PV_OUTPUT_COUNTRY_CODES.values()
if not min(country_codes) <= country_code <= max(country_codes):
_raise_country_error(country_code, "country outside of valid range! ")
def _convert_to_country_code(country: Union[str, int]) -> int:
if isinstance(country, str):
try:
return PV_OUTPUT_COUNTRY_CODES[country]
except KeyError:
_raise_country_error(country)
elif isinstance(country, int):
_check_country_code(country)
return country
def _page_has_next_link(soup: BeautifulSoup):
return bool(soup.find_all("a", text="Next"))
############# PROCESS HTML #########################
def _process_metadata(soup: BeautifulSoup, return_constituents=False) -> pd.DataFrame:
pv_system_size_metadata = _process_system_size_col(soup)
index = pv_system_size_metadata.index
pv_systems_metadata = [
pv_system_size_metadata,
_process_output_col(soup, index),
_process_generation_and_average_cols(soup, index),
_process_efficiency_col(soup, index),
]
df = pd.concat(pv_systems_metadata, axis="columns")
df = _convert_metadata_cols_to_numeric(df)
df["system_DC_capacity_W"] = df["capacity_kW"] * 1e3
del df["capacity_kW"]
if return_constituents:
pv_systems_metadata.append(df)
return tuple(pv_systems_metadata)
return df
def _process_system_size_col(soup: BeautifulSoup) -> pd.DataFrame:
pv_system_size_col = soup.find_all("a", href=re.compile("display\.jsp\?sid="))
metadata = []
for row in pv_system_size_col:
metadata_for_row = {}
# Get system ID
href = row.attrs["href"]
p = re.compile("^display\.jsp\?sid=(\d+)$")
href_match = p.match(href)
metadata_for_row["system_id"] = href_match.group(1)
# Process title (lots of metadata in here!)
title, title_meta = row.attrs["title"].split("|")
# Name and capacity
p = re.compile("(.*) (\d+\.\d+kW)")
title_match = p.match(title)
metadata_for_row["name"] = title_match.group(1)
metadata_for_row["capacity"] = title_match.group(2)
# Other key-value pairs:
key_value = title_meta.split("<br/>")
key_value_dict = {}
for line in key_value:
key_value_split = line.split(":")
key = key_value_split[0].strip()
# Some values have a colon(!)
value = ":".join(key_value_split[1:]).strip()
key_value_dict[key] = value
metadata_for_row.update(key_value_dict)
# Some cleaning
# Remove <img ...> from Location
location = metadata_for_row["Location"]
p = re.compile("(<img .*\>)?(.*)")
img_groups = p.search(location).groups()
if img_groups[0] is not None:
metadata_for_row["Location"] = img_groups[1].strip()
metadata.append(metadata_for_row)
df = | pd.DataFrame(metadata) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 29 11:21:50 2020
@author: kaisa
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.dates as mdates
import seaborn as sns
from datetime import datetime, timedelta
import numpy as np
from bokeh.plotting import ColumnDataSource, figure, output_file, show, save
from bokeh.models import HoverTool
sns.set()
# ----------------- Import data ---------------------
# Covid-19 time series
covid_conf = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
covid_deaths = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
# transform data
def transform_covid_data(df):
df.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
df = df.groupby('Country/Region').sum()
df = df.transpose()
df.index = [datetime.strptime(d, '%m/%d/%y') for d in df.index]
return df
covid_conf = transform_covid_data(covid_conf)
covid_deaths = transform_covid_data(covid_deaths)
# Country data
country_info = pd.read_csv(os.path.join('.', 'data', 'covid19countryinfo.csv'))
def convert_string(x):
try:
return np.float(x.replace(',',''))
except:
return np.nan
for c in ['pop', 'gdp2019', 'healthexp']:
country_info[c] = country_info[c].apply(convert_string)
# Restrictions
restrictions = pd.read_csv(os.path.join('.', 'data', 'restrictions.csv'),sep=';')
restrictions['date'] = pd.to_datetime(restrictions['date'], format='%d.%m.%Y')
# --------- Question 1: Duration till turning point ----------
def get_time_series(df, country, min_value):
s = df.loc[df[country]>=min_value, country]
s.index = np.array([datetime.timestamp(x) for x in s.index])/(3600*24)
s.index -= s.index[0]
return s
"""
countries = ['China', 'Korea, South', 'Italy', 'Germany', 'US']
for i, c in enumerate(countries):
fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(10,4.5))
s = get_time_series(covid_conf, c, 100)
color = cm.viridis(100)
ax[0].plot(s, color=color, label='Cases (total)')
ax[0].tick_params(axis='y', labelcolor=color)
ax[0].set_xlabel('Days since 100 cases')
ax2 = ax[0].twinx()
color = cm.viridis(150)
ax2.plot(s.index, np.gradient(s, s.index), color=color, label='Cases per day')
ax2.tick_params(axis='y', labelcolor=color)
lines, labels = ax[0].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='upper left')
ax2.grid(None)
s = get_time_series(covid_deaths, c, 10)
color = cm.viridis(100)
ax[1].plot(s, color=color, label='Deaths (total)')
ax[1].tick_params(axis='y', labelcolor=color)
ax[1].set_xlabel('Days since 10 deaths')
ax2 = ax[1].twinx()
color = cm.viridis(150)
ax2.plot(s.index, np.gradient(s, s.index), color=color, label='Deaths per day')
ax2.tick_params(axis='y', labelcolor=color)
lines, labels = ax[1].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='upper left')
ax2.grid(None)
plt.suptitle(c)
fig.tight_layout()
plt.subplots_adjust(top=0.92)
plt.savefig(os.path.join('D:\\Datensicherung\\Projekte\\Udacity_DataScience\\diagramme',c+'_series'), dpi=200)
"""
# ---------- Question 2: Effect of restrictions ------------
def add_annotations(ax, df, s):
last_y = 0
df.reset_index(drop=True, inplace=True)
for i, row in df.iterrows():
y = s.iloc[s.index.get_loc(row.date, method='nearest')]
x_text = row.date - timedelta(days=10)
y_text = y + s.max()/10
y_text = max(y_text, last_y+s.max()/12)
last_y = y_text
ann = ax.annotate(str(i+1),
xy=(row.date, y), xycoords='data',
xytext=(x_text, y_text), textcoords='data',
size=15, va="center", ha="center",
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3,rad=-0.2",
fc="k", color='k'),
)
plt.text(1.02, 0.92-i*0.06, '{:d}: {}'.format(i+1,row.text), horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes,
fontsize=11)
plt.text(1.02, 1, 'Restrictions / Actions:', horizontalalignment='left',
verticalalignment='top', transform=ax.transAxes,
fontsize=13, fontweight='bold')
countries = ['Korea, South', 'Italy', 'Germany']
for i, c in enumerate(countries):
fig, ax = plt.subplots(figsize=(9,4))
s = covid_conf[c]
plt.plot(s)
ax.set_xlim((s.idxmin(),s.idxmax()+timedelta(days=5)))
myFmt = mdates.DateFormatter('%m-%d')
ax.xaxis.set_major_formatter(myFmt)
ax.set_ylabel('Confirmed cases (total)')
fig.tight_layout()
plt.subplots_adjust(right=0.6, top=0.93)
plt.suptitle(c)
add_annotations(ax, restrictions.loc[restrictions.country_region==c], s)
plt.savefig(os.path.join('D:\\Datensicherung\\Projekte\\Udacity_DataScience\\diagramme',c+'_measures'), dpi=200)
# ---------- Question 3: Correlation with death/cases ratio ------------
from collections import defaultdict
from bokeh.palettes import Viridis
ratio = defaultdict(list)
df_death_ratio = []
country_info['death_ratio'] = np.nan
for c in covid_conf.columns:
df = pd.concat([ | pd.Series(covid_conf[c], name='Cases') | pandas.Series |
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
#for i in range(0,meth.shape[1]-w+1,1):
#if i>w-2 and i<2*w:
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
start=datetime.datetime.now()
MU=np.zeros((2,w))
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
#if enough_reads(window,w,complete=True):
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
coverage = cov_context = 0
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
MU=np.zeros((2,w))
start=datetime.datetime.now()
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)=='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)=='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # G
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2r = pd.DataFrame(data=dr)
#df2.head()
tempr=tempr.append(df2r, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
#temp.head()
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','G','N'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','C','T'],np.nan)
methbin = aggreR # backup
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#total += w
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','A','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHG'
print("Done CHG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
def split_bam(samplenames,Folder):
# get bam size
spbam_list = []
bamfile = samplenames + '.bam'
statinfo_out = os.stat(Folder+bamfile)
bamsize = statinfo_out.st_size
samfile = pysam.Samfile(Folder+bamfile, "rb")
fileout_base = os.path.splitext(bamfile)[0] # filename
ext = '.bam'
x = 0
fileout = Folder+fileout_base+"_" + str(x)+ext # filename_x.bam
print("fileout",fileout)
header = samfile.header
outfile = pysam.Samfile(fileout, "wb", header = header)
sum_Outfile_Size=0
for reads in samfile.fetch():
outfile.write(reads)
statinfo_out = os.stat(fileout)
outfile_Size = statinfo_out.st_size
if(outfile_Size >=337374182 and sum_Outfile_Size <= bamsize):
sum_Outfile_Size = sum_Outfile_Size + outfile_Size
x = x + 1
spbam_list.append(fileout_base + "_" + str(x)+ext)
outfile.close()
pysam.index(fileout)
fileout = Folder+fileout_base + "_" + str(x)+ext
print("fileout",fileout)
outfile = pysam.Samfile(fileout, "wb",header = header)
outfile.close()
pysam.index(fileout)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--windowsize",type=int, default=4 ,help='number of CGs')
parser.add_argument("-c", "--cores",type=int, default=4, help='number of cores')
parser.add_argument("-m", "--MeH",type=int, default=2, help='Methylation heterogeneity score 1:Abundance 2:PW 3:Phylogeny')
parser.add_argument("-d", "--dist",type=int, default=1, help='Distance between methylation patterns 1:Hamming 2:WDK')
parser.add_argument("--CG", default=False, action='store_true', help='Include genomic context CG')
parser.add_argument("--CHG", default=False, action='store_true', help='Include genomic context CHG')
parser.add_argument("--CHH", default=False, action='store_true', help='Include genomic context CHH')
parser.add_argument("--opt", default=False, action='store_true', help='Outputs compositions of methylation patterns')
parser.add_argument('--mlv', default=False, action='store_true', help='Outputs methylation levels')
parser.add_argument('--imp', default=True, action='store_false', help='Implement BSImp (impute if valid)')
args = parser.parse_args()
import sys
import time
import os
import pandas as pd
import multiprocessing
from joblib import Parallel, delayed
#num_cores = multiprocessing.cpu_count()
if __name__ == "__main__":
open_log('MeHscreening.log')
logm("Call genome screening.")
#start = time.time()
Folder = 'MeHdata/'
files = os.listdir(Folder)
bam_list = []
# all samples' bam files
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension == '.fa':
fa = filename
if file_extension == '.bam':
bam_list.append(filename)
#if 'cores' in args:
# num_cores = args.cores
#else:
# num_cores = 4
Parallel(n_jobs=args.cores)(delayed(split_bam)(bamfile,Folder=Folder) for bamfile in bam_list)
spbam_list = []
tempfiles = os.listdir(Folder)
for file in tempfiles:
filename, file_extension = os.path.splitext(file)
if file_extension=='.bam' and filename not in bam_list:
spbam_list.append(filename)
#print(spbam_list)
topp = pd.DataFrame(columns=['sample','coverage','context_coverage','context'])
#CG = []
#start=t.time()
if args.CG:
con='CG'
CG=Parallel(n_jobs=args.cores)(delayed(CGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CG.")
# merge MeH within sample
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False, header = True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
#os.chdir('../')
#os.chdir(outputFolder)
logm("Merging ML within samples for CG.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
logm("Merging ML between samples for CG.")
# merge ML between samples
if args.mlv:
for sample in bam_list:
tomerge_dir = Folder + con + '_ML_' + str(sample) + '.csv'
res_dir = Folder + con + '_ML_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'ML': sample})
Result=Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result = Result.rename(columns={'ML': sample})
#Result = Result.drop(columns=['counts','pos','depth','dis'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
logm("Merging MeH between samples for CG.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result.head()
Result.dropna(axis = 0, thresh=4, inplace = True)
Result = Result.rename(columns={'MeH': sample})
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
Result.to_csv(Folder + con + '_' +'Results.csv' ,index = False,header=True)
print("All done.",len(bam_list),"bam files processed and merged for CG.")
logm("All done. "+str(len(bam_list))+" bam files processed and merged for CG.")
for i in CG:
toout=pd.DataFrame({'sample':i[0],'coverage':i[1],'context_coverage':i[2],'context':i[3]},index=[0])
topp=topp.append(toout)
if args.CHG:
con='CHG'
CG=Parallel(n_jobs=args.cores)(delayed(CHGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CHG.")
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
logm("Merging ML within samples for CHG.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
#Count=Count.drop_duplicates()
#print(Count)
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
logm("Merging MeH between samples for CHG.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
#Tomerge = Tomerge.drop(columns=['dis','ML','depth'])
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result = | pd.read_csv(tomerge_dir) | pandas.read_csv |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
| tm.assert_frame_equal(expected, result) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import pandas as pd
import numpy as np
from tests.fixtures import DataTestCase
import mock
from tsfresh.transformers.relevant_feature_augmenter import RelevantFeatureAugmenter
class RelevantFeatureAugmenterTestCase(DataTestCase):
def setUp(self):
self.test_df = self.create_test_data_sample()
fc_parameters = {"length": None}
self.kind_to_fc_parameters = {"a": fc_parameters.copy(),
"b": fc_parameters.copy()}
def test_not_fitted(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
self.assertRaises(RuntimeError, augmenter.transform, X)
def test_no_timeseries(self):
augmenter = RelevantFeatureAugmenter()
X = pd.DataFrame()
y = pd.Series()
self.assertRaises(RuntimeError, augmenter.fit, X, y)
def test_nothing_relevant(self):
augmenter = RelevantFeatureAugmenter(kind_to_fc_parameters=self.kind_to_fc_parameters,
column_value="val", column_id="id", column_sort="sort",
column_kind="kind")
y = pd.Series({10: 1, 500: 0})
X = | pd.DataFrame(index=[10, 500]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
import pickle
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
with open("top_n-20m.pickle", "rb") as fp:
top_n = pickle.load(fp)
top_n_items = [ [x[0] for x in row] for row in top_n]
te = TransactionEncoder()
te_ary = te.fit(top_n_items).transform(top_n_items, sparse=True)
topn_df = | pd.DataFrame.sparse.from_spmatrix(te_ary, columns=te.columns_) | pandas.DataFrame.sparse.from_spmatrix |
import numpy as np
import os
import pandas as pd
######## feature template ########
def get_bs_cat(df_policy, idx_df, col):
'''
In:
DataFrame(df_policy),
Any(idx_df),
str(col),
Out:
Series(cat_),
Description:
get category directly from df_policy
'''
df = df_policy.groupby(level=0).agg({col: lambda x: x.iloc[0]})
return(df.loc[idx_df, col].fillna(0))
def get_bs_real_freq(X_all, idx_df, col):
'''
In:
DataFrame(X_all),
Any(idx_df)
str(col),
Out:
Series(real_freq_),
Description:
get number of occurance of each value of categorical features
'''
# frequency of category
df_map = X_all.groupby([col]).agg({'real_prem_plc': lambda x: len(x)})
# map premium by category to policy
real_freq_col = X_all[col].map(df_map['real_prem_plc'])
return(real_freq_col.loc[idx_df])
def get_bs_cat_inter(df_policy, idx_df, col1, col2):
'''
In:
DataFrame(df_policy),
Any(idx_df)
str(col),
Out:
Series(cat_col1_col2),
Description:
get interaction of two categorical features
'''
# all col combination of col1 and col2
df_policy = df_policy.groupby(level=0).agg({col1: lambda x: str(x.iloc[0]), col2: lambda x: str(x.iloc[0])})
# concat col1 and col2
cat_col1_col2 = df_policy[col1] + df_policy[col2]
return(cat_col1_col2.loc[idx_df])
def get_bs_real_mc_mean(col_cat, X_train, y_train, X_valid=pd.DataFrame(), train_only=True, fold=5, prior=1000):
'''
In:
str(col_cat)
DataFrame(X_train),
DataFrame(y_train),
DataFrame(X_valid),
bool(train_only),
double(fold),
Out:
Series(real_mc_prob_distr),
Description:
get mean of next_premium by col_cat
'''
if train_only:
np.random.seed(1)
rand = np.random.rand(len(X_train))
lvs = [i / float(fold) for i in range(fold+1)]
X_arr = []
for i in range(fold):
msk = (rand >= lvs[i]) & (rand < lvs[i+1])
X_slice = X_train[msk]
X_base = X_train[~msk]
y_base = y_train[~msk]
X_slice = get_bs_real_mc_mean(col_cat, X_base, y_base, X_valid=X_slice, train_only=False, prior=prior)
X_arr.append(X_slice)
real_mc_mean = pd.concat(X_arr).loc[X_train.index]
else:
# merge col_cat with label
y_train = y_train.merge(X_train[[col_cat]], how='left', left_index=True, right_index=True)
y_train = y_train.assign(real_mc_mean = y_train['Next_Premium'])
# get mean of each category and smoothed by global mean
smooth_mean = lambda x: (x.sum() + prior * y_train['real_mc_mean'].mean()) / (len(x) + prior)
y_train = y_train.groupby([col_cat]).agg({'real_mc_mean': smooth_mean})
real_mc_mean = X_valid[col_cat].map(y_train['real_mc_mean'])
# fill na with global mean
real_mc_mean = real_mc_mean.where(~ | pd.isnull(real_mc_mean) | pandas.isnull |
"""Plotting Utils."""
import altair as alt
import numpy as np
import pandas as pd
def similarity_heatmaps(sim_of_sim, labels_dict, axis_title='', width=300, columns=2, min_step=1):
plot_data = pd.DataFrame()
for key in sim_of_sim:
# Compute x^2 + y^2 across a 2D grid
labels = labels_dict[key] or range(len(sim_of_sim[key]))
x, y = np.meshgrid(labels, labels)
z = sim_of_sim[key]
# Convert this grid to columnar data expected by Altair
row = pd.DataFrame({'x': x.ravel(),
'y': y.ravel(),
'z': sim_of_sim[key].ravel(),
'key': key})
plot_data = plot_data.append(row + text, ignore_index=True)
base = alt.Chart(plot_data, width=width, height=width).mark_rect().encode(
x=alt.X('x:N', sort=labels, title=axis_title,
axis=alt.Axis(values=np.asarray(labels)[list(range(0, len(labels), min_step))])),
y=alt.X('y:N', sort=labels, title=axis_title,
axis=alt.Axis(values=np.asarray(labels)[list(range(0, len(labels), min_step))])),
color=alt.Color('z:Q', title='Similarity'),
)
# Configure text
text = base.mark_text(baseline='middle').encode(
text='z:Q',
color=alt.condition(
alt.datum.z > 0.5,
alt.value('black'),
alt.value('white')
)
)
plot = base + text
plot.facet(
facet=alt.Facet('key:N', title='', header=alt.Header(labelFontSize=16)),
columns=columns
).resolve_scale(
color='independent',
x='independent',
y='independent',
).configure_axis(
labelFontSize=14,
titleFontSize=16
).configure_legend(
labelFontSize=14,
titleFontSize=14
).configure_title(
fontSize=18)
def layer_similarity(sim_of_sim, labels_dict, axis_title='', width=300, columns=2):
plot_data = | pd.DataFrame() | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
import itertools
from collections import OrderedDict
from tqdm.auto import tqdm
import datetime
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.feature_extraction.text import CountVectorizer
from logging import getLogger
logger = getLogger("splt")
def load(DO_TEST = False):
"""raw csvとweaponなど外部データを読み込んでjoin"""
train = pd.read_csv("../data/train_data.csv")
test = pd.read_csv('../data/test_data.csv')
if DO_TEST:
logger.info("【TEST_MODE】Remove train/test data.")
train = train.iloc[:1000,:]
test = test.iloc[:300,:]
train["split_type"] = "train"
test["split_type"] = "test"
merge = pd.concat([train, test]).reset_index(drop=True).drop(["game-ver", "lobby"], axis=1)
merge["period"] = pd.to_datetime(merge["period"])
# 外部データの結合
weapon = pd.read_csv("../data/weapon_merge.csv")
weapon = weapon[["key", "category1", "category2", "mainweapon", "subweapon", "special", "reskin", "main_power_up"]]
weapon.columns = ["key", "cat1", "cat2", "main", "sub", "special", "reskin", "powerup"]
# 区切り文字が無いことを確認
assert weapon.applymap(lambda x:"-" in x).any().any() == False
assert merge[["mode", "stage"]].applymap(lambda x:"-" in x).any().any() == False
m2 = merge.copy()
for team, num in itertools.product(["A", "B"], [1, 2, 3, 4]):
m2 = pd.merge(left=m2, right=weapon, left_on=f"{team}{num}-weapon", right_on="key", how="left")
assert m2.shape[0] == merge.shape[0]
m2 = m2.drop("key", axis=1).rename(columns={x:f"{team}{num}-weapon_{x}" for x in weapon.columns if x!= "key"})
col_weapon_names = ["weapon"] + ["weapon_" + x for x in weapon.columns[1:]]
# 外部stageデータの結合
stage = pd.read_csv("../data/stage.csv")
area = pd.merge(left=merge["stage"], right=stage, left_on="stage", right_on="key", how="left")["area"]
m2["stage_area"] = area
# A1 user推定関連
m = pd.read_csv("../data/merge_A1-level_bin.csv")
m2["A1-level_bin"] = m["A1-level_bin"]
m = | pd.read_csv("../data/merge_A1-uid.csv") | pandas.read_csv |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
data = pd.read_csv(filename).dropna()
data.drop(data[(data["id"] == 0)].index, inplace=True)
features = ["bedrooms", "bathrooms", "sqft_living", "sqft_lot",
"waterfront", "view", "condition", "grade", "sqft_above",
"sqft_basement"]
floors = pd.get_dummies(data["floors"])
house_age = data["yr_built"] - pd.to_numeric(data["date"].astype(
str).apply(lambda x: x[:4]))
years_from_renovation = data["yr_renovated"] - pd.to_numeric(data[
"date"].astype(str).apply(lambda x: x[:4]))
last_renovation_or_built_year = pd.concat([house_age,
years_from_renovation],
axis=1).max(axis=1)
data["zipcode"] = (data["zipcode"] / 10).astype(int)
zipcodes = pd.get_dummies(data["zipcode"], prefix="zipcode-")
x = | pd.concat([floors, data[features], zipcodes], axis=1) | pandas.concat |
#-----------------------------------------------------------------
#-- Master Thesis - Model-Based Predictive Maintenance on FPGA
#--
#-- File : prediction.py
#-- Description : Model analysis module on test data
#--
#-- Author : <NAME>
#-- Master : MSE Mechatronics
#-- Date : 14.01.2022
#-----------------------------------------------------------------
import data_structure as ds
import pandas as pd
import configparser
from matplotlib import pyplot as plt
from pandas import DataFrame
from model_manipulation import Model
import numpy as np
import seaborn as sns
print("All libraries are loaded from prediction")
# prediction and create dataframe for storing information
def dataStoring(model, data_scaled, window, timestep, features, batch_size, one_step):
dfAnomaly = pd.DataFrame(index=np.arange(window), columns=['Expected', 'Reconstructed', 'Error'], dtype=np.float64)
# prediction using one input after the other (batch size of 1)
if one_step:
for i in range(window):
# make one-step forecast
X = data_scaled[i]
X = X.reshape(X.shape[0], timestep, features)
yhat = model.predict(X, batch_size=batch_size)
yhat = yhat[0,0]
# store forecast
dfAnomaly['Reconstructed'].iloc[i] = yhat
expected = data_scaled[i]
dfAnomaly['Expected'].iloc[i] = data_scaled[i]
print('DataNum=%d, Reconstructed=%f, Expected=%f' % (i+1, yhat, expected))
# report performance)
test_mae_loss = np.mean(np.abs(yhat-expected))
print('Test L1 loss: %.3f' % test_mae_loss)
dfAnomaly['Error'].iloc[i] = test_mae_loss
# prediction using several inputs at the same time (batch size greather than 1)
else:
X = data_scaled[0:window]
X = X.reshape(X.shape[0], timestep, features)
print("Running reconstruction of test data")
yhat = model.predict(X, batch_size=batch_size, verbose=1)
yhat = yhat.reshape(window, 1)
dfAnomaly['Reconstructed'] = yhat
dfAnomaly['Expected'] = data_scaled[0:window]
dfAnomaly['Error'] = np.abs(dfAnomaly['Reconstructed'].values - dfAnomaly['Expected'].values)
# looking at the summary of the model
model.summary()
return dfAnomaly
# plots of expected vs reconstructed/predicted values
def showResults(dfAnomaly, window, threshold):
plt.figure(figsize=(16, 9))
plt.plot(dfAnomaly['Expected'])
plt.plot(dfAnomaly['Reconstructed'])
plt.ylabel('Acceleration values (g)')
plt.xlabel('Time steps')
plt.legend(['Target', 'Reconstruction'])
plt.title('Reconstruction of acceleration values')
plt.show()
# summarize results
results = | DataFrame() | pandas.DataFrame |
"""
Tests for simulation of time series
Author: <NAME>
License: Simplified-BSD
"""
import numpy as np
import pandas as pd
from numpy.testing import assert_, assert_allclose, assert_equal
import pytest
from scipy.signal import lfilter
from .test_impulse_responses import TVSS
from statsmodels.tools.sm_exceptions import SpecificationWarning, \
EstimationWarning
from statsmodels.tsa.statespace import (sarimax, structural, varmax,
dynamic_factor)
def test_arma_lfilter():
# Tests of an ARMA model simulation against scipy.signal.lfilter
# Note: the first elements of the generated SARIMAX datasets are based on
# the initial state, so we do not include them in the comparisons
np.random.seed(10239)
nobs = 100
eps = np.random.normal(size=nobs)
# AR(1)
mod = sarimax.SARIMAX([0], order=(1, 0, 0))
actual = mod.simulate([0.5, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = lfilter([1], [1, -0.5], eps)
assert_allclose(actual[1:], desired)
# MA(1)
mod = sarimax.SARIMAX([0], order=(0, 0, 1))
actual = mod.simulate([0.5, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = lfilter([1, 0.5], [1], eps)
assert_allclose(actual[1:], desired)
# ARMA(1, 1)
mod = sarimax.SARIMAX([0], order=(1, 0, 1))
actual = mod.simulate([0.5, 0.2, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = lfilter([1, 0.2], [1, -0.5], eps)
assert_allclose(actual[1:], desired)
def test_arma_direct():
# Tests of an ARMA model simulation against direct construction
# This is useful for e.g. trend components
# Note: the first elements of the generated SARIMAX datasets are based on
# the initial state, so we do not include them in the comparisons
np.random.seed(10239)
nobs = 100
eps = np.random.normal(size=nobs)
exog = np.random.normal(size=nobs)
# AR(1)
mod = sarimax.SARIMAX([0], order=(1, 0, 0))
actual = mod.simulate([0.5, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = np.zeros(nobs)
for i in range(nobs):
if i == 0:
desired[i] = eps[i]
else:
desired[i] = 0.5 * desired[i - 1] + eps[i]
assert_allclose(actual[1:], desired)
# MA(1)
mod = sarimax.SARIMAX([0], order=(0, 0, 1))
actual = mod.simulate([0.5, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = np.zeros(nobs)
for i in range(nobs):
if i == 0:
desired[i] = eps[i]
else:
desired[i] = 0.5 * eps[i - 1] + eps[i]
assert_allclose(actual[1:], desired)
# ARMA(1, 1)
mod = sarimax.SARIMAX([0], order=(1, 0, 1))
actual = mod.simulate([0.5, 0.2, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = np.zeros(nobs)
for i in range(nobs):
if i == 0:
desired[i] = eps[i]
else:
desired[i] = 0.5 * desired[i - 1] + 0.2 * eps[i - 1] + eps[i]
assert_allclose(actual[1:], desired)
# ARMA(1, 1) + intercept
mod = sarimax.SARIMAX([0], order=(1, 0, 1), trend='c')
actual = mod.simulate([1.3, 0.5, 0.2, 1.], nobs + 1,
state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = np.zeros(nobs)
for i in range(nobs):
trend = 1.3
if i == 0:
desired[i] = trend + eps[i]
else:
desired[i] = (trend + 0.5 * desired[i - 1] +
0.2 * eps[i - 1] + eps[i])
assert_allclose(actual[1:], desired)
# ARMA(1, 1) + intercept + time trend
# Note: to allow time-varying SARIMAX to simulate 101 observations, need to
# give it 101 observations up front
mod = sarimax.SARIMAX(np.zeros(nobs + 1), order=(1, 0, 1), trend='ct')
actual = mod.simulate([1.3, 0.2, 0.5, 0.2, 1.], nobs + 1,
state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = np.zeros(nobs)
for i in range(nobs):
trend = 1.3 + 0.2 * (i + 1)
if i == 0:
desired[i] = trend + eps[i]
else:
desired[i] = (trend + 0.5 * desired[i - 1] +
0.2 * eps[i - 1] + eps[i])
assert_allclose(actual[1:], desired)
# ARMA(1, 1) + intercept + time trend + exog
# Note: to allow time-varying SARIMAX to simulate 101 observations, need to
# give it 101 observations up front
# Note: the model is regression with SARIMAX errors, so the exog is
# introduced into the observation equation rather than the ARMA part
mod = sarimax.SARIMAX(np.zeros(nobs + 1), exog=np.r_[0, exog],
order=(1, 0, 1), trend='ct')
actual = mod.simulate([1.3, 0.2, -0.5, 0.5, 0.2, 1.], nobs + 1,
state_shocks=np.r_[eps, 0],
initial_state=np.zeros(mod.k_states))
desired = np.zeros(nobs)
for i in range(nobs):
trend = 1.3 + 0.2 * (i + 1)
if i == 0:
desired[i] = trend + eps[i]
else:
desired[i] = (trend + 0.5 * desired[i - 1] +
0.2 * eps[i - 1] + eps[i])
desired = desired - 0.5 * exog
assert_allclose(actual[1:], desired)
def test_structural():
np.random.seed(38947)
nobs = 100
eps = np.random.normal(size=nobs)
exog = np.random.normal(size=nobs)
eps1 = np.zeros(nobs)
eps2 = np.zeros(nobs)
eps2[49] = 1
eps3 = np.zeros(nobs)
eps3[50:] = 1
# AR(1)
mod1 = structural.UnobservedComponents([0], autoregressive=1)
mod2 = sarimax.SARIMAX([0], order=(1, 0, 0))
actual = mod1.simulate([1, 0.5], nobs, state_shocks=eps,
initial_state=np.zeros(mod1.k_states))
desired = mod2.simulate([0.5, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
assert_allclose(actual, desired)
# ARX(1)
mod1 = structural.UnobservedComponents(np.zeros(nobs), exog=exog,
autoregressive=1)
mod2 = sarimax.SARIMAX(np.zeros(nobs), exog=exog, order=(1, 0, 0))
actual = mod1.simulate([1, 0.5, 0.2], nobs, state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
desired = mod2.simulate([0.2, 0.5, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
assert_allclose(actual, desired)
# Irregular
mod = structural.UnobservedComponents([0], 'irregular')
actual = mod.simulate([1.], nobs, measurement_shocks=eps,
initial_state=np.zeros(mod.k_states))
assert_allclose(actual, eps)
# Fixed intercept
# (in practice this is a deterministic constant, because an irregular
# component must be added)
warning = SpecificationWarning
match = 'irregular component added'
with pytest.warns(warning, match=match):
mod = structural.UnobservedComponents([0], 'fixed intercept')
actual = mod.simulate([1.], nobs, measurement_shocks=eps,
initial_state=[10])
assert_allclose(actual, 10 + eps)
# Deterministic constant
mod = structural.UnobservedComponents([0], 'deterministic constant')
actual = mod.simulate([1.], nobs, measurement_shocks=eps,
initial_state=[10])
assert_allclose(actual, 10 + eps)
# Local level
mod = structural.UnobservedComponents([0], 'local level')
actual = mod.simulate([1., 1.], nobs, measurement_shocks=eps,
state_shocks=eps2,
initial_state=np.zeros(mod.k_states))
assert_allclose(actual, eps + eps3)
# Random walk
mod = structural.UnobservedComponents([0], 'random walk')
actual = mod.simulate([1.], nobs, measurement_shocks=eps,
state_shocks=eps2,
initial_state=np.zeros(mod.k_states))
assert_allclose(actual, eps + eps3)
# Fixed slope
# (in practice this is a deterministic trend, because an irregular
# component must be added)
warning = SpecificationWarning
match = 'irregular component added'
with pytest.warns(warning, match=match):
mod = structural.UnobservedComponents([0], 'fixed slope')
actual = mod.simulate([1., 1.], nobs, measurement_shocks=eps,
state_shocks=eps2, initial_state=[0, 1])
assert_allclose(actual, eps + np.arange(100))
# Deterministic trend
mod = structural.UnobservedComponents([0], 'deterministic trend')
actual = mod.simulate([1.], nobs, measurement_shocks=eps,
state_shocks=eps2, initial_state=[0, 1])
assert_allclose(actual, eps + np.arange(100))
# Local linear deterministic trend
mod = structural.UnobservedComponents(
[0], 'local linear deterministic trend')
actual = mod.simulate([1., 1.], nobs, measurement_shocks=eps,
state_shocks=eps2, initial_state=[0, 1])
desired = eps + np.r_[np.arange(50), 1 + np.arange(50, 100)]
assert_allclose(actual, desired)
# Random walk with drift
mod = structural.UnobservedComponents([0], 'random walk with drift')
actual = mod.simulate([1.], nobs, state_shocks=eps2,
initial_state=[0, 1])
desired = np.r_[np.arange(50), 1 + np.arange(50, 100)]
assert_allclose(actual, desired)
# Local linear trend
mod = structural.UnobservedComponents([0], 'local linear trend')
actual = mod.simulate([1., 1., 1.], nobs, measurement_shocks=eps,
state_shocks=np.c_[eps2, eps1], initial_state=[0, 1])
desired = eps + np.r_[np.arange(50), 1 + np.arange(50, 100)]
assert_allclose(actual, desired)
actual = mod.simulate([1., 1., 1.], nobs, measurement_shocks=eps,
state_shocks=np.c_[eps1, eps2], initial_state=[0, 1])
desired = eps + np.r_[np.arange(50), np.arange(50, 150, 2)]
assert_allclose(actual, desired)
# Smooth trend
mod = structural.UnobservedComponents([0], 'smooth trend')
actual = mod.simulate([1., 1.], nobs, measurement_shocks=eps,
state_shocks=eps1, initial_state=[0, 1])
desired = eps + np.r_[np.arange(100)]
assert_allclose(actual, desired)
actual = mod.simulate([1., 1.], nobs, measurement_shocks=eps,
state_shocks=eps2, initial_state=[0, 1])
desired = eps + np.r_[np.arange(50), np.arange(50, 150, 2)]
assert_allclose(actual, desired)
# Random trend
mod = structural.UnobservedComponents([0], 'random trend')
actual = mod.simulate([1., 1.], nobs,
state_shocks=eps1, initial_state=[0, 1])
desired = np.r_[np.arange(100)]
assert_allclose(actual, desired)
actual = mod.simulate([1., 1.], nobs,
state_shocks=eps2, initial_state=[0, 1])
desired = np.r_[np.arange(50), np.arange(50, 150, 2)]
assert_allclose(actual, desired)
# Seasonal (deterministic)
mod = structural.UnobservedComponents([0], 'irregular', seasonal=2,
stochastic_seasonal=False)
actual = mod.simulate([1.], nobs, measurement_shocks=eps,
initial_state=[10])
desired = eps + np.tile([10, -10], 50)
assert_allclose(actual, desired)
# Seasonal (stochastic)
mod = structural.UnobservedComponents([0], 'irregular', seasonal=2)
actual = mod.simulate([1., 1.], nobs, measurement_shocks=eps,
state_shocks=eps2, initial_state=[10])
desired = eps + np.r_[np.tile([10, -10], 25), np.tile([11, -11], 25)]
assert_allclose(actual, desired)
# Cycle (deterministic)
mod = structural.UnobservedComponents([0], 'irregular', cycle=True)
actual = mod.simulate([1., 1.2], nobs, measurement_shocks=eps,
initial_state=[1, 0])
x1 = [np.cos(1.2), np.sin(1.2)]
x2 = [-np.sin(1.2), np.cos(1.2)]
T = np.array([x1, x2])
desired = eps
states = [1, 0]
for i in range(nobs):
desired[i] += states[0]
states = np.dot(T, states)
assert_allclose(actual, desired)
# Cycle (stochastic)
mod = structural.UnobservedComponents([0], 'irregular', cycle=True,
stochastic_cycle=True)
actual = mod.simulate([1., 1., 1.2], nobs, measurement_shocks=eps,
state_shocks=np.c_[eps2, eps2], initial_state=[1, 0])
x1 = [np.cos(1.2), np.sin(1.2)]
x2 = [-np.sin(1.2), np.cos(1.2)]
T = np.array([x1, x2])
desired = eps
states = [1, 0]
for i in range(nobs):
desired[i] += states[0]
states = np.dot(T, states) + eps2[i]
assert_allclose(actual, desired)
def test_varmax():
np.random.seed(371934)
nobs = 100
eps = np.random.normal(size=nobs)
exog = np.random.normal(size=(nobs, 1))
eps1 = np.zeros(nobs)
eps2 = np.zeros(nobs)
eps2[49] = 1
eps3 = np.zeros(nobs)
eps3[50:] = 1
# VAR(2) - single series
mod1 = varmax.VARMAX([[0]], order=(2, 0), trend='n')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.simulate([0.5, 0.2, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod1.k_states))
desired = mod2.simulate([0.5, 0.2, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
assert_allclose(actual, desired)
# VMA(2) - single series
mod1 = varmax.VARMAX([[0]], order=(0, 2), trend='n')
mod2 = sarimax.SARIMAX([0], order=(0, 0, 2))
actual = mod1.simulate([0.5, 0.2, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod1.k_states))
desired = mod2.simulate([0.5, 0.2, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
assert_allclose(actual, desired)
# VARMA(2, 2) - single series
warning = EstimationWarning
match = r'VARMA\(p,q\) models is not'
with pytest.warns(warning, match=match):
mod1 = varmax.VARMAX([[0]], order=(2, 2), trend='n')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 2))
actual = mod1.simulate([0.5, 0.2, 0.1, -0.2, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod1.k_states))
desired = mod2.simulate([0.5, 0.2, 0.1, -0.2, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
assert_allclose(actual, desired)
# VARMA(2, 2) + trend - single series
warning = EstimationWarning
match = r'VARMA\(p,q\) models is not'
with pytest.warns(warning, match=match):
mod1 = varmax.VARMAX([[0]], order=(2, 2), trend='c')
mod2 = sarimax.SARIMAX([0], order=(2, 0, 2), trend='c')
actual = mod1.simulate([10, 0.5, 0.2, 0.1, -0.2, 1], nobs,
state_shocks=eps,
initial_state=np.zeros(mod1.k_states))
desired = mod2.simulate([10, 0.5, 0.2, 0.1, -0.2, 1], nobs,
state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
assert_allclose(actual, desired)
# VAR(1)
transition = np.array([[0.5, 0.1],
[-0.1, 0.2]])
mod = varmax.VARMAX([[0, 0]], order=(1, 0), trend='n')
actual = mod.simulate(np.r_[transition.ravel(), 1., 0, 1.], nobs,
state_shocks=np.c_[eps1, eps1],
initial_state=np.zeros(mod.k_states))
assert_allclose(actual, 0)
actual = mod.simulate(np.r_[transition.ravel(), 1., 0, 1.], nobs,
state_shocks=np.c_[eps1, eps1], initial_state=[1, 1])
desired = np.zeros((nobs, 2))
state = np.r_[1, 1]
for i in range(nobs):
desired[i] = state
state = np.dot(transition, state)
assert_allclose(actual, desired)
# VAR(1) + measurement error
mod = varmax.VARMAX([[0, 0]], order=(1, 0), trend='n',
measurement_error=True)
actual = mod.simulate(np.r_[transition.ravel(), 1., 0, 1., 1., 1.], nobs,
measurement_shocks=np.c_[eps, eps],
state_shocks=np.c_[eps1, eps1],
initial_state=np.zeros(mod.k_states))
assert_allclose(actual, np.c_[eps, eps])
# VARX(1)
mod = varmax.VARMAX(np.zeros((nobs, 2)), order=(1, 0), trend='n',
exog=exog)
actual = mod.simulate(np.r_[transition.ravel(), 5, -2, 1., 0, 1.], nobs,
state_shocks=np.c_[eps1, eps1], initial_state=[1, 1])
desired = np.zeros((nobs, 2))
state = np.r_[1, 1]
for i in range(nobs):
desired[i] = state
if i < nobs - 1:
state = exog[i + 1] * [5, -2] + np.dot(transition, state)
assert_allclose(actual, desired)
# VMA(1)
# TODO: This is just a smoke test
mod = varmax.VARMAX(
np.random.normal(size=(nobs, 2)), order=(0, 1), trend='n')
mod.simulate(mod.start_params, nobs)
# VARMA(2, 2) + trend + exog
# TODO: This is just a smoke test
warning = EstimationWarning
match = r"VARMA\(p,q\) models is not"
with pytest.warns(warning, match=match):
mod = varmax.VARMAX(
np.random.normal(size=(nobs, 2)), order=(2, 2), trend='c',
exog=exog)
mod.simulate(mod.start_params, nobs)
def test_dynamic_factor():
np.random.seed(93739)
nobs = 100
eps = np.random.normal(size=nobs)
exog = np.random.normal(size=(nobs, 1))
eps1 = np.zeros(nobs)
eps2 = np.zeros(nobs)
eps2[49] = 1
eps3 = np.zeros(nobs)
eps3[50:] = 1
# DFM: 2 series, AR(2) factor
mod1 = dynamic_factor.DynamicFactor([[0, 0]], k_factors=1, factor_order=2)
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.simulate([-0.9, 0.8, 1., 1., 0.5, 0.2], nobs,
measurement_shocks=np.c_[eps1, eps1],
state_shocks=eps,
initial_state=np.zeros(mod1.k_states))
desired = mod2.simulate([0.5, 0.2, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
assert_allclose(actual[:, 0], -0.9 * desired)
assert_allclose(actual[:, 1], 0.8 * desired)
# DFM: 2 series, AR(2) factor, exog
mod1 = dynamic_factor.DynamicFactor(np.zeros((nobs, 2)), k_factors=1,
factor_order=2, exog=exog)
mod2 = sarimax.SARIMAX([0], order=(2, 0, 0))
actual = mod1.simulate([-0.9, 0.8, 5, -2, 1., 1., 0.5, 0.2], nobs,
measurement_shocks=np.c_[eps1, eps1],
state_shocks=eps,
initial_state=np.zeros(mod1.k_states))
desired = mod2.simulate([0.5, 0.2, 1], nobs, state_shocks=eps,
initial_state=np.zeros(mod2.k_states))
assert_allclose(actual[:, 0], -0.9 * desired + 5 * exog[:, 0])
assert_allclose(actual[:, 1], 0.8 * desired - 2 * exog[:, 0])
# DFM, 3 series, VAR(2) factor, exog, error VAR
# TODO: This is just a smoke test
mod = dynamic_factor.DynamicFactor(np.random.normal(size=(nobs, 3)),
k_factors=2, factor_order=2, exog=exog,
error_order=2, error_var=True)
mod.simulate(mod.start_params, nobs)
def test_known_initialization():
# Need to test that "known" initialization is taken into account in
# time series simulation
np.random.seed(38947)
nobs = 100
eps = np.random.normal(size=nobs)
eps1 = np.zeros(nobs)
eps2 = np.zeros(nobs)
eps2[49] = 1
eps3 = np.zeros(nobs)
eps3[50:] = 1
# SARIMAX
# (test that when state shocks are shut down, the initial state
# geometrically declines according to the AR parameter)
mod = sarimax.SARIMAX([0], order=(1, 0, 0))
mod.ssm.initialize_known([100], [[0]])
actual = mod.simulate([0.5, 1.], nobs, state_shocks=eps1)
assert_allclose(actual, 100 * 0.5**np.arange(nobs))
# Unobserved components
# (test that the initial level shifts the entire path)
mod = structural.UnobservedComponents([0], 'local level')
mod.ssm.initialize_known([100], [[0]])
actual = mod.simulate([1., 1.], nobs, measurement_shocks=eps,
state_shocks=eps2)
assert_allclose(actual, 100 + eps + eps3)
# VARMAX
# (here just test that with an independent VAR we have each initial state
# geometrically declining at the appropriate rate)
transition = np.diag([0.5, 0.2])
mod = varmax.VARMAX([[0, 0]], order=(1, 0), trend='n')
mod.initialize_known([100, 50], np.diag([0, 0]))
actual = mod.simulate(np.r_[transition.ravel(), 1., 0, 1.], nobs,
measurement_shocks=np.c_[eps1, eps1],
state_shocks=np.c_[eps1, eps1])
assert_allclose(actual, np.c_[100 * 0.5**np.arange(nobs),
50 * 0.2**np.arange(nobs)])
# Dynamic factor
# (test that the initial state declines geometrically and then loads
# correctly onto the series)
mod = dynamic_factor.DynamicFactor([[0, 0]], k_factors=1, factor_order=1)
mod.initialize_known([100], [[0]])
actual = mod.simulate([0.8, 0.2, 1.0, 1.0, 0.5], nobs,
measurement_shocks=np.c_[eps1, eps1],
state_shocks=eps1)
tmp = 100 * 0.5**np.arange(nobs)
assert_allclose(actual, np.c_[0.8 * tmp, 0.2 * tmp])
def test_sequential_simulate():
# Test that we can perform simulation, change the system matrices, and then
# perform simulation again (i.e. check that everything updates correctly
# in the simulation smoother).
n_simulations = 100
mod = sarimax.SARIMAX([1], order=(0, 0, 0), trend='c')
actual = mod.simulate([1, 0], n_simulations)
assert_allclose(actual, np.ones(n_simulations))
actual = mod.simulate([10, 0], n_simulations)
assert_allclose(actual, np.ones(n_simulations) * 10)
def test_sarimax_end_time_invariant_noshocks():
# Test simulating values from the end of a time-invariant SARIMAX model
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 11)
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
assert_allclose(initial_state, 5)
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Compute the desired simulated values directly
desired = 10 * 0.5**np.arange(1, nsimulations + 1)
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations))
def test_sarimax_simple_differencing_end_time_invariant_noshocks():
# Test simulating values from the end of a time-invariant SARIMAX model
# in which simple differencing is used.
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.cumsum(np.arange(0, 11))
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), simple_differencing=True)
res = mod.filter([0.5, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
assert_allclose(initial_state, 5)
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Compute the desired simulated values directly
desired = 10 * 0.5**np.arange(1, nsimulations + 1)
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations))
def test_sarimax_time_invariant_shocks(reset_randomstate):
# Test simulating values from the end of a time-invariant SARIMAX model,
# with nonzero shocks
endog = np.arange(1, 11)
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=nsimulations)
state_shocks = np.random.normal(size=nsimulations)
initial_state = res.predicted_state[:1, -1]
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = (
lfilter([1], [1, -0.5], np.r_[initial_state, state_shocks])[:-1] +
measurement_shocks)
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_sarimax_simple_differencing_end_time_invariant_shocks():
# Test simulating values from the end of a time-invariant SARIMAX model
# in which simple differencing is used.
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.cumsum(np.arange(0, 11))
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), simple_differencing=True)
res = mod.filter([0.5, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=nsimulations)
state_shocks = np.random.normal(size=nsimulations)
initial_state = res.predicted_state[:1, -1]
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = (
lfilter([1], [1, -0.5], np.r_[initial_state, state_shocks])[:-1] +
measurement_shocks)
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_sarimax_time_varying_trend_noshocks():
# Test simulating values from the end of a time-varying SARIMAX model
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 11)
mod = sarimax.SARIMAX(endog, trend='t')
res = mod.filter([1., 0.2, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
assert_allclose(initial_state, 12)
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Compute the desired simulated values directly
desired = lfilter([1], [1, -0.2], np.r_[12, np.arange(11, 20)])
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations))
def test_sarimax_simple_differencing_time_varying_trend_noshocks():
# Test simulating values from the end of a time-varying SARIMAX model
# in which simple differencing is used.
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.cumsum(np.arange(0, 11))
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), trend='t',
simple_differencing=True)
res = mod.filter([1., 0.2, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
assert_allclose(initial_state, 12)
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Compute the desired simulated values directly
desired = lfilter([1], [1, -0.2], np.r_[12, np.arange(11, 20)])
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations))
def test_sarimax_time_varying_trend_shocks(reset_randomstate):
# Test simulating values from the end of a time-varying SARIMAX model,
# with nonzero shocks
endog = np.arange(1, 11)
mod = sarimax.SARIMAX(endog, trend='t')
res = mod.filter([1., 0.2, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=nsimulations)
state_shocks = np.random.normal(size=nsimulations)
initial_state = res.predicted_state[:1, -1]
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
x = np.r_[initial_state, state_shocks + np.arange(11, 21)]
desired = lfilter([1], [1, -0.2], x)[:-1] + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_sarimax_simple_differencing_time_varying_trend_shocks(
reset_randomstate):
# Test simulating values from the end of a time-varying SARIMAX model
# in which simple differencing is used.
# with nonzero shocks
endog = np.cumsum(np.arange(0, 11))
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), trend='t',
simple_differencing=True)
res = mod.filter([1., 0.2, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=nsimulations)
state_shocks = np.random.normal(size=nsimulations)
initial_state = res.predicted_state[:1, -1]
assert_allclose(initial_state, 12)
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
x = np.r_[initial_state, state_shocks + np.arange(11, 21)]
desired = lfilter([1], [1, -0.2], x)[:-1] + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_sarimax_time_varying_exog_noshocks():
# Test simulating values from the end of a time-varying SARIMAX model
# In this test, we suppress randomness by setting the shocks to zeros
# Note that `exog` here has basically the same effect as measurement shocks
endog = np.arange(1, 11)
exog = np.arange(1, 21)**2
mod = sarimax.SARIMAX(endog, exog=exog[:10])
res = mod.filter([1., 0.2, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
actual = res.simulate(nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Compute the desired simulated values directly
desired = (lfilter([1], [1, -0.2], np.r_[initial_state, [0] * 9]) +
exog[10:])
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations, exog=exog[10:]))
def test_sarimax_simple_differencing_time_varying_exog_noshocks():
# Test simulating values from the end of a time-varying SARIMAX model
# with simple differencing
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.cumsum(np.arange(0, 11))
exog = np.cumsum(np.arange(0, 21)**2)
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), exog=exog[:11],
simple_differencing=True)
res = mod.filter([1., 0.2, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
actual = res.simulate(nsimulations, exog=exog[11:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Compute the desired simulated values directly
desired = (lfilter([1], [1, -0.2], np.r_[initial_state, [0] * 9]) +
np.diff(exog)[10:])
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[11:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations, exog=exog[11:]))
def test_sarimax_time_varying_exog_shocks(reset_randomstate):
# Test simulating values from the end of a time-varying SARIMAX model,
# with nonzero shocks
endog = np.arange(1, 11)
exog = np.arange(1, 21)**2
mod = sarimax.SARIMAX(endog, exog=exog[:10])
res = mod.filter([1., 0.2, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=nsimulations)
state_shocks = np.random.normal(size=nsimulations)
initial_state = res.predicted_state[:1, -1]
actual = res.simulate(nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
x = np.r_[initial_state, state_shocks[:-1]]
desired = lfilter([1], [1, -0.2], x) + exog[10:] + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_sarimax_simple_differencing_time_varying_exog_shocks(
reset_randomstate):
# Test simulating values from the end of a time-varying SARIMAX model
# Note that `exog` here has basically the same effect as measurement shocks
endog = np.cumsum(np.arange(0, 11))
exog = np.cumsum(np.arange(0, 21)**2)
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), exog=exog[:11],
simple_differencing=True)
res = mod.filter([1., 0.2, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=nsimulations)
state_shocks = np.random.normal(size=nsimulations)
initial_state = res.predicted_state[:1, -1]
actual = res.simulate(nsimulations, exog=exog[11:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Compute the desired simulated values directly
x = np.r_[initial_state, state_shocks[:-1]]
desired = (lfilter([1], [1, -0.2], x) + np.diff(exog)[10:] +
measurement_shocks)
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[11:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_unobserved_components_end_time_invariant_noshocks():
# Test simulating values from the end of a time-invariant
# UnobservedComponents model
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 11)
mod = structural.UnobservedComponents(endog, 'llevel')
res = mod.filter([1., 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# The mean of the simulated local level values is just the last value
desired = initial_state[0]
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations))
def test_unobserved_components_end_time_invariant_shocks(reset_randomstate):
# Test simulating values from the end of a time-invariant
# UnobservedComponents model, with nonzero shocks
endog = np.arange(1, 11)
mod = structural.UnobservedComponents(endog, 'llevel')
res = mod.filter([1., 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=nsimulations)
state_shocks = np.random.normal(size=nsimulations)
initial_state = res.predicted_state[:1, -1]
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = (initial_state + np.cumsum(np.r_[0, state_shocks[:-1]]) +
measurement_shocks)
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_unobserved_components_end_time_varying_exog_noshocks():
# Test simulating values from the end of a time-varying
# UnobservedComponents model with exog
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 11)
exog = np.arange(1, 21)**2
mod = structural.UnobservedComponents(endog, 'llevel', exog=exog[:10])
res = mod.filter([1., 1., 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
actual = res.simulate(nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# The mean of the simulated local level values is just the last value
desired = initial_state[0] + exog[10:]
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations, exog=exog[10:]))
def test_unobserved_components_end_time_varying_exog_shocks(reset_randomstate):
# Test simulating values from the end of a time-varying
# UnobservedComponents model with exog
endog = np.arange(1, 11)
exog = np.arange(1, 21)**2
mod = structural.UnobservedComponents(endog, 'llevel', exog=exog[:10])
res = mod.filter([1., 1., 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=nsimulations)
state_shocks = np.random.normal(size=nsimulations)
initial_state = res.predicted_state[:1, -1]
actual = res.simulate(nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = (initial_state + np.cumsum(np.r_[0, state_shocks[:-1]]) +
measurement_shocks + exog[10:])
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_varmax_end_time_invariant_noshocks():
# Test simulating values from the end of a time-invariant VARMAX model
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 21).reshape(10, 2)
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([1., 1., 1., 1., 1., 0.5, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[:, -1]
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = (initial_state[:, None] * 2 ** np.arange(10)).T
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations))
def test_varmax_end_time_invariant_shocks(reset_randomstate):
# Test simulating values from the end of a time-invariant VARMAX model,
# with nonzero shocks
endog = np.arange(1, 21).reshape(10, 2)
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([1., 1., 1., 1., 1., 0.5, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=(nsimulations, mod.k_endog))
state_shocks = np.random.normal(size=(nsimulations, mod.k_states))
initial_state = res.predicted_state[:, -1]
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
for i in range(1, nsimulations):
desired[i] = desired[i - 1].sum() + state_shocks[i - 1]
desired = desired + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_varmax_end_time_varying_trend_noshocks():
# Test simulating values from the end of a time-varying VARMAX model
# with a trend
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 21).reshape(10, 2)
mod = varmax.VARMAX(endog, trend='ct')
res = mod.filter([1., 1., 1., 1., 1, 1, 1., 1., 1., 0.5, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
# Need to set the final predicted state given the new trend
with res._set_final_predicted_state(exog=None, out_of_sample=10):
initial_state = res.predicted_state[:, -1].copy()
# Simulation
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
tmp_trend = 1 + np.arange(11, 21)
for i in range(1, nsimulations):
desired[i] = desired[i - 1].sum() + tmp_trend[i] + state_shocks[i - 1]
desired = desired + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations))
def test_varmax_end_time_varying_trend_shocks(reset_randomstate):
# Test simulating values from the end of a time-varying VARMAX model
# with a trend
endog = np.arange(1, 21).reshape(10, 2)
mod = varmax.VARMAX(endog, trend='ct')
res = mod.filter([1., 1., 1., 1., 1, 1, 1., 1., 1., 0.5, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=(nsimulations, mod.k_endog))
state_shocks = np.random.normal(size=(nsimulations, mod.k_states))
# Need to set the final predicted state given the new trend
with res._set_final_predicted_state(exog=None, out_of_sample=10):
initial_state = res.predicted_state[:, -1].copy()
# Simulation
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
tmp_trend = 1 + np.arange(11, 21)
for i in range(1, nsimulations):
desired[i] = desired[i - 1].sum() + tmp_trend[i] + state_shocks[i - 1]
desired = desired + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_varmax_end_time_varying_exog_noshocks():
# Test simulating values from the end of a time-varying VARMAX model
# with exog
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 21).reshape(10, 2)
exog = np.arange(1, 21)**2
mod = varmax.VARMAX(endog, trend='n', exog=exog[:10])
res = mod.filter([1., 1., 1., 1., 1., 1., 1., 0.5, 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
# Need to set the final predicted state given the new exog
tmp_exog = mod._validate_out_of_sample_exog(exog[10:], out_of_sample=10)
with res._set_final_predicted_state(exog=tmp_exog, out_of_sample=10):
initial_state = res.predicted_state[:, -1].copy()
# Simulation
actual = res.simulate(nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
for i in range(1, nsimulations):
desired[i] = desired[i - 1].sum() + exog[10 + i] + state_shocks[i - 1]
desired = desired + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations, exog=exog[10:]))
def test_varmax_end_time_varying_exog_shocks(reset_randomstate):
# Test simulating values from the end of a time-varying VARMAX model
# with exog
endog = np.arange(1, 23).reshape(11, 2)
exog = np.arange(1, 21)**2
mod = varmax.VARMAX(endog[:10], trend='n', exog=exog[:10])
res = mod.filter([1., 1., 1., 1., 1., 1., 1., 0.5, 1.])
mod2 = varmax.VARMAX(endog, trend='n', exog=exog[:11])
res2 = mod2.filter([1., 1., 1., 1., 1., 1., 1., 0.5, 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=(nsimulations, mod.k_endog))
state_shocks = np.random.normal(size=(nsimulations, mod.k_states))
# Need to set the final predicted state given the new exog
tmp_exog = mod._validate_out_of_sample_exog(exog[10:], out_of_sample=10)
with res._set_final_predicted_state(exog=tmp_exog, out_of_sample=10):
initial_state = res.predicted_state[:, -1].copy()
# Simulation
actual = res.simulate(nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
actual2 = res2.simulate(nsimulations, exog=exog[11:], anchor=-1,
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=res2.predicted_state[:, -2])
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
for i in range(1, nsimulations):
desired[i] = desired[i - 1].sum() + exog[10 + i] + state_shocks[i - 1]
desired = desired + measurement_shocks
assert_allclose(actual, desired)
assert_allclose(actual2, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_dynamic_factor_end_time_invariant_noshocks():
# Test simulating values from the end of a time-invariant dynamic factor
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 21).reshape(10, 2)
mod = dynamic_factor.DynamicFactor(endog, k_factors=1, factor_order=1)
mod.ssm.filter_univariate = True
res = mod.filter([1., 1., 1., 1., 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
# Simulation
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Construct the simulation directly
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
for i in range(1, nsimulations):
desired[i] = desired[i - 1] + state_shocks[i - 1]
desired = desired + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations))
def test_dynamic_factor_end_time_invariant_shocks(reset_randomstate):
# Test simulating values from the end of a time-invariant dynamic factor
endog = np.arange(1, 21).reshape(10, 2)
mod = dynamic_factor.DynamicFactor(endog, k_factors=1, factor_order=1)
mod.ssm.filter_univariate = True
res = mod.filter([1., 1., 1., 1., 1., 1., 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=(nsimulations, mod.k_endog))
state_shocks = np.random.normal(size=(nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
# Simulation
actual = res.simulate(nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Construct the simulation directly
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
for i in range(1, nsimulations):
desired[i] = desired[i - 1] + state_shocks[i - 1]
desired = desired + measurement_shocks
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_dynamic_factor_end_time_varying_exog_noshocks():
# Test simulating values from the end of a time-varying dynamic factor
# model with exogenous inputs
# In this test, we suppress randomness by setting the shocks to zeros
endog = np.arange(1, 21).reshape(10, 2)
exog = np.arange(1, 21)**2
mod = dynamic_factor.DynamicFactor(endog, k_factors=1, factor_order=1,
exog=exog[:10])
mod.ssm.filter_univariate = True
res = mod.filter([1., 1., 1., 1., 1., 1., 1.])
nsimulations = 10
measurement_shocks = np.zeros((nsimulations, mod.k_endog))
state_shocks = np.zeros((nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
# Simulation
actual = res.simulate(nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Construct the simulation directly
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
for i in range(1, nsimulations):
desired[i] = desired[i - 1] + state_shocks[i - 1]
desired = desired + measurement_shocks + exog[10:, None]
assert_allclose(actual, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
# Alternatively, since we've shut down the shocks, we can compare against
# the forecast values
assert_allclose(actual, res.forecast(nsimulations, exog=exog[10:]))
def test_dynamic_factor_end_time_varying_exog_shocks(reset_randomstate):
# Test simulating values from the end of a time-varying dynamic factor
# model with exogenous inputs
endog = np.arange(1, 23).reshape(11, 2)
exog = np.arange(1, 21)**2
mod = dynamic_factor.DynamicFactor(
endog[:10], k_factors=1, factor_order=1, exog=exog[:10])
mod.ssm.filter_univariate = True
res = mod.filter([1., 1., 1., 1., 1., 1., 1.])
mod2 = dynamic_factor.DynamicFactor(
endog, k_factors=1, factor_order=1, exog=exog[:11])
mod2.ssm.filter_univariate = True
res2 = mod2.filter([1., 1., 1., 1., 1., 1., 1.])
nsimulations = 10
measurement_shocks = np.random.normal(size=(nsimulations, mod.k_endog))
state_shocks = np.random.normal(size=(nsimulations, mod.k_states))
initial_state = res.predicted_state[..., -1]
# Simulations
actual = res.simulate(nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
actual2 = res2.simulate(nsimulations, exog=exog[11:], anchor=-1,
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
# Construct the simulation directly
desired = np.zeros((nsimulations, mod.k_endog))
desired[0] = initial_state
for i in range(1, nsimulations):
desired[i] = desired[i - 1] + state_shocks[i - 1]
desired = desired + measurement_shocks + exog[10:, None]
assert_allclose(actual, desired)
assert_allclose(actual2, desired)
# Test using the model versus the results class
mod_actual = mod.simulate(
res.params, nsimulations, exog=exog[10:], anchor='end',
measurement_shocks=measurement_shocks,
state_shocks=state_shocks,
initial_state=initial_state)
assert_allclose(mod_actual, desired)
def test_pandas_univariate_rangeindex():
# Simulate will also have RangeIndex
endog = pd.Series(np.zeros(2))
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
# Default simulate anchors to the start of the sample
actual = res.simulate(2, state_shocks=np.zeros(2),
initial_state=np.zeros(1))
desired = pd.Series([0, 0])
assert_allclose(actual, desired)
# Alternative anchor changes the index
actual = res.simulate(2, anchor=2, state_shocks=np.zeros(2),
initial_state=np.zeros(1))
ix = pd.RangeIndex(2, 4)
desired = pd.Series([0, 0], index=ix)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
def test_pandas_univariate_rangeindex_repetitions():
# Simulate will also have RangeIndex
endog = pd.Series(np.zeros(2))
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
# Default simulate anchors to the start of the sample
actual = res.simulate(2, state_shocks=np.zeros(2),
initial_state=np.zeros(1), repetitions=2)
columns = pd.MultiIndex.from_product([['y'], [0, 1]])
desired = pd.DataFrame(np.zeros((2, 2)), columns=columns)
assert_allclose(actual, desired)
assert_(actual.columns.equals(desired.columns))
# Alternative anchor changes the index
actual = res.simulate(2, anchor=2, state_shocks=np.zeros(2),
initial_state=np.zeros(1), repetitions=2)
ix = pd.RangeIndex(2, 4)
columns = pd.MultiIndex.from_product([['y'], [0, 1]])
desired = pd.DataFrame(np.zeros((2, 2)), index=ix, columns=columns)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
assert_(actual.columns.equals(desired.columns))
def test_pandas_univariate_dateindex():
# Simulation will maintain have date index
ix = pd.date_range(start='2000', periods=2, freq='M')
endog = pd.Series(np.zeros(2), index=ix)
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
# Default simulate anchors to the start of the sample
actual = res.simulate(2, state_shocks=np.zeros(2),
initial_state=np.zeros(1))
ix = pd.date_range(start='2000-01', periods=2, freq='M')
desired = pd.Series([0, 0], index=ix)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
# Alternative anchor changes the index
actual = res.simulate(2, anchor=2, state_shocks=np.zeros(2),
initial_state=np.zeros(1))
ix = pd.date_range(start='2000-03', periods=2, freq='M')
desired = pd.Series([0, 0], index=ix)
assert_allclose(actual, desired)
def test_pandas_univariate_dateindex_repetitions():
# Simulation will maintain have date index
ix = pd.date_range(start='2000', periods=2, freq='M')
endog = pd.Series(np.zeros(2), index=ix)
mod = sarimax.SARIMAX(endog)
res = mod.filter([0.5, 1.])
# Default simulate anchors to the start of the sample
actual = res.simulate(2, state_shocks=np.zeros(2),
initial_state=np.zeros(1), repetitions=2)
ix = pd.date_range(start='2000-01', periods=2, freq='M')
columns = pd.MultiIndex.from_product([['y'], [0, 1]])
desired = pd.DataFrame(np.zeros((2, 2)), index=ix, columns=columns)
assert_allclose(actual, desired)
assert_(actual.columns.equals(desired.columns))
# Alternative anchor changes the index
actual = res.simulate(2, anchor=2, state_shocks=np.zeros(2),
initial_state=np.zeros(1), repetitions=2)
ix = pd.date_range(start='2000-03', periods=2, freq='M')
columns = pd.MultiIndex.from_product([['y'], [0, 1]])
desired = pd.DataFrame(np.zeros((2, 2)), index=ix, columns=columns)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
assert_(actual.columns.equals(desired.columns))
def test_pandas_multivariate_rangeindex():
# Simulate will also have RangeIndex
endog = pd.DataFrame(np.zeros((2, 2)))
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([0.5, 0., 0., 0.2, 1., 0., 1.])
# Default simulate anchors to the start of the sample
actual = res.simulate(2, state_shocks=np.zeros((2, 2)),
initial_state=np.zeros(2))
desired = pd.DataFrame(np.zeros((2, 2)))
assert_allclose(actual, desired)
# Alternative anchor changes the index
actual = res.simulate(2, anchor=2, state_shocks=np.zeros((2, 2)),
initial_state=np.zeros(2))
ix = pd.RangeIndex(2, 4)
desired = pd.DataFrame(np.zeros((2, 2)), index=ix)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
def test_pandas_multivariate_rangeindex_repetitions():
# Simulate will also have RangeIndex
endog = pd.DataFrame(np.zeros((2, 2)), columns=['y1', 'y2'])
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([0.5, 0., 0., 0.2, 1., 0., 1.])
# Default simulate anchors to the start of the sample
actual = res.simulate(2, state_shocks=np.zeros((2, 2)),
initial_state=np.zeros(2), repetitions=2)
columns = pd.MultiIndex.from_product([['y1', 'y2'], [0, 1]])
desired = pd.DataFrame(np.zeros((2, 4)), columns=columns)
assert_allclose(actual, desired)
assert_(actual.columns.equals(desired.columns))
# Alternative anchor changes the index
actual = res.simulate(2, anchor=2, state_shocks=np.zeros((2, 2)),
initial_state=np.zeros(2), repetitions=2)
ix = pd.RangeIndex(2, 4)
columns = pd.MultiIndex.from_product([['y1', 'y2'], [0, 1]])
desired = pd.DataFrame(np.zeros((2, 4)), index=ix, columns=columns)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
assert_(actual.columns.equals(desired.columns))
def test_pandas_multivariate_dateindex():
# Simulate will also have RangeIndex
ix = pd.date_range(start='2000', periods=2, freq='M')
endog = pd.DataFrame(np.zeros((2, 2)), index=ix)
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([0.5, 0., 0., 0.2, 1., 0., 1.])
# Default simulate anchors to the start of the sample
actual = res.simulate(2, state_shocks=np.zeros((2, 2)),
initial_state=np.zeros(2))
desired = pd.DataFrame(np.zeros((2, 2)), index=ix)
assert_allclose(actual, desired)
# Alternative anchor changes the index
actual = res.simulate(2, anchor=2, state_shocks=np.zeros((2, 2)),
initial_state=np.zeros(2))
ix = pd.date_range(start='2000-03', periods=2, freq='M')
desired = pd.DataFrame(np.zeros((2, 2)), index=ix)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
def test_pandas_multivariate_dateindex_repetitions():
# Simulate will also have RangeIndex
ix = pd.date_range(start='2000', periods=2, freq='M')
endog = pd.DataFrame(np.zeros((2, 2)), columns=['y1', 'y2'], index=ix)
mod = varmax.VARMAX(endog, trend='n')
res = mod.filter([0.5, 0., 0., 0.2, 1., 0., 1.])
# Default simulate anchors to the start of the sample
actual = res.simulate(2, state_shocks=np.zeros((2, 2)),
initial_state=np.zeros(2), repetitions=2)
columns = pd.MultiIndex.from_product([['y1', 'y2'], [0, 1]])
desired = pd.DataFrame(np.zeros((2, 4)), columns=columns, index=ix)
assert_allclose(actual, desired)
assert_(actual.columns.equals(desired.columns))
# Alternative anchor changes the index
actual = res.simulate(2, anchor=2, state_shocks=np.zeros((2, 2)),
initial_state=np.zeros(2), repetitions=2)
ix = pd.date_range(start='2000-03', periods=2, freq='M')
columns = pd.MultiIndex.from_product([['y1', 'y2'], [0, 1]])
desired = pd.DataFrame(np.zeros((2, 4)), index=ix, columns=columns)
assert_allclose(actual, desired)
assert_(actual.index.equals(desired.index))
assert_(actual.columns.equals(desired.columns))
def test_pandas_anchor():
# Test that anchor with dates works
ix = | pd.date_range(start='2000', periods=2, freq='M') | pandas.date_range |
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Union, Optional, List, Dict
from tqdm import tqdm
from .basic_predictor import BasicPredictor
from .utils import inverse_preprocess_data
from common_utils_dev import to_parquet, to_abs_path
COMMON_CONFIG = {
"data_dir": to_abs_path(__file__, "../../../storage/dataset/dataset/v001/train"),
"exp_dir": to_abs_path(__file__, "../../../storage/experiments/v001"),
"test_data_dir": to_abs_path(
__file__, "../../../storage/dataset/dataset/v001/test"
),
}
DATA_CONFIG = {
"checkpoint_dir": "./check_point",
"generate_output_dir": "./generated_output",
"base_feature_assets": ["BTC-USDT"],
}
MODEL_CONFIG = {
"lookback_window": 120,
"batch_size": 512,
"lr": 0.0001,
"epochs": 10,
"print_epoch": 1,
"print_iter": 50,
"save_epoch": 1,
"criterion": "l2",
"criterion_params": {},
"load_strict": False,
"model_name": "BackboneV1",
"model_params": {
"in_channels": 86,
"n_blocks": 5,
"n_block_layers": 10,
"growth_rate": 12,
"dropout": 0.1,
"channel_reduction": 0.5,
"activation": "tanhexp",
"normalization": "bn",
"seblock": True,
"sablock": True,
},
}
class PredictorV1(BasicPredictor):
"""
Functions:
train(): train the model with train_data
generate(save_dir: str): generate predictions & labels with test_data
predict(X: torch.Tensor): gemerate prediction with given data
"""
def __init__(
self,
data_dir=COMMON_CONFIG["data_dir"],
test_data_dir=COMMON_CONFIG["test_data_dir"],
d_config={},
m_config={},
exp_dir=COMMON_CONFIG["exp_dir"],
device="cuda",
pin_memory=False,
num_workers=8,
mode="train",
default_d_config=DATA_CONFIG,
default_m_config=MODEL_CONFIG,
):
super().__init__(
data_dir=data_dir,
test_data_dir=test_data_dir,
d_config=d_config,
m_config=m_config,
exp_dir=exp_dir,
device=device,
pin_memory=pin_memory,
num_workers=num_workers,
mode=mode,
default_d_config=default_d_config,
default_m_config=default_m_config,
)
def _invert_to_prediction(self, pred_abs_factor, pred_sign_factor):
multiply = ((pred_sign_factor >= 0.5) * 1.0) + ((pred_sign_factor < 0.5) * -1.0)
return pred_abs_factor * multiply
def _compute_train_loss(self, train_data_dict):
# Set train mode
self.model.train()
self.model.zero_grad()
# Set loss
pred_abs_factor, pred_sign_factor = self.model(
x=train_data_dict["X"], id=train_data_dict["ID"]
)
# Y loss
loss = self.criterion(pred_abs_factor, train_data_dict["Y"].view(-1).abs()) * 10
loss += self.binary_criterion(
pred_sign_factor, (train_data_dict["Y"].view(-1) >= 0) * 1.0
)
return (
loss,
self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
),
)
def _compute_test_loss(self, test_data_dict):
# Set eval mode
self.model.eval()
# Set loss
pred_abs_factor, pred_sign_factor = self.model(
x=test_data_dict["X"], id=test_data_dict["ID"]
)
# Y loss
loss = self.criterion(pred_abs_factor, test_data_dict["Y"].view(-1).abs()) * 10
loss += self.binary_criterion(
pred_sign_factor, (test_data_dict["Y"].view(-1) >= 0) * 1.0
)
return (
loss,
self._invert_to_prediction(
pred_abs_factor=pred_abs_factor, pred_sign_factor=pred_sign_factor
),
)
def _step(self, train_data_dict):
loss, _ = self._compute_train_loss(train_data_dict=train_data_dict)
loss.backward()
self.optimizer.step()
return loss
def _display_info(self, train_loss, test_loss, test_predictions, test_labels):
pred_norm = test_predictions[test_predictions >= 0].abs().mean()
label_norm = test_labels[test_labels >= 0].abs().mean()
# Print loss info
print(
f""" [+] train_loss: {train_loss:.2f}, test_loss: {test_loss:.2f} | [+] pred_norm: {pred_norm:.2f}, label_norm: {label_norm:.2f}"""
)
def _build_abs_bins(self, df):
abs_bins = {}
for column in df.columns:
_, abs_bins[column] = pd.qcut(
df[column].abs(), 10, labels=False, retbins=True
)
abs_bins[column] = np.concatenate([[0], abs_bins[column][1:-1], [np.inf]])
return | pd.DataFrame(abs_bins) | pandas.DataFrame |
"""
oil price data source: https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf
"""
import pandas as pd
import numpy as np
import tabula
import requests
import plotly.express as px
import plotly.graph_objects as go
import time
from pandas.tseries.offsets import MonthEnd
import re
import xmltodict
def process_table(table_df):
print("processing the downloaded PDF from PPAC website.")
cols = ['Date', 'Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol',
'Date_D', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']
table_df.columns = cols
table_df.drop(table_df.index[[0,3]],inplace=True)
table_df.drop('Date_D',axis=1,inplace=True)
table_df.dropna(how='any',inplace=True)
table_df = table_df.astype(str)
table_df = table_df.apply(lambda x: x.str.replace(" ", ""))
table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']] = table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']].astype(float)
table_df['Date'] = pd.to_datetime(table_df['Date'])
table_petrol = table_df[['Date','Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol','Kolkata_Petrol']]
table_diesel = table_df[['Date','Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']]
new_cols = [i.replace("_Petrol", "") for i in list(table_petrol.columns)]
table_petrol.columns = new_cols
table_diesel.columns = new_cols
return table_petrol, table_diesel
def get_international_exchange_rates(start_date,end_date):
print("sending request for international exchange rates.")
exchange_dates_url = "https://api.exchangeratesapi.io/history?"
params = {"start_at": start_date, "end_at":end_date, "base":"USD", "symbols":"INR"}
try:
req = requests.get(exchange_dates_url,params=params)
except Exception as e:
print(e)
print("request failed. using the saved data.")
dollar_exchange_rates = pd.read_csv("dollar_exhange_rates.csv")
dollar_exchange_rates['Date'] = pd.to_datetime(dollar_exchange_rates)
dollar_exchange_rates.set_index('Date').sort_index(ascending=False)
return dollar_exchange_rates
else:
print("request successful. processing the data.")
dollar_exchange_rates = pd.DataFrame(req.json()['rates']).T.reset_index()
dollar_exchange_rates['index'] = pd.to_datetime(dollar_exchange_rates['index'])
dollar_exchange_rates.set_index('index').sort_index(ascending=False)
dollar_exchange_rates.to_csv("dollar_exhange_rates.csv")
return dollar_exchange_rates
# def merge_data(dollar_exchange_rates, international_oil_prices, oil_price_data):
# print("merging the international oil price data, international exchange rate data and domestic oil price data.")
# trim_int = international_oil_prices.loc[international_oil_prices.index.isin(oil_price_data.index)].dropna()
# oil_price_data = oil_price_data.merge(trim_int, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data = oil_price_data.merge(dollar_exchange_rates, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data['INR'] = oil_price_data['INR'].round(2)
# oil_price_data['INR_pc'] = (((oil_price_data['INR'] - oil_price_data['INR'].iloc[-1])/oil_price_data['INR'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude'] = (oil_price_data['Price'] / 159) * oil_price_data['INR']
# oil_price_data['int_pc'] = (((oil_price_data['Price'] - oil_price_data['Price'].iloc[-1])/oil_price_data['Price'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude_pc'] = (((oil_price_data['rup_lit_crude'] - oil_price_data['rup_lit_crude'].iloc[-1])/oil_price_data['rup_lit_crude'].iloc[-1])*100).round(2)
# return oil_price_data
def download_ppac():
print("sending request for domestic oil price data from PPAC website.")
ppac_url = r"https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf"
try:
req = requests.get(ppac_url)
except Exception as e:
print(e)
print("Request unsuccessful. The saved file will be used.")
else:
with open('DATA/price_data.pdf', 'wb') as file:
file.write(req.content)
print('file saved successfully.')
def prepare_downloaded_file():
print("preparing downloaded file for analysis.")
oil_prices = 'DATA/price_data.pdf'
tables = tabula.read_pdf(oil_prices, pages="all")
proc_dfs = [process_table(i) for i in tables]
petrol_df = pd.concat(i[0] for i in proc_dfs)
diesel_df = pd.concat(i[1] for i in proc_dfs)
print(f"Success. Length of Petrol prices {len(petrol_df)}------ diesel prices {len(diesel_df)}")
petrol_df['mean_price'] = (petrol_df['Delhi']+petrol_df['Mumbai']+petrol_df['Chennai']+petrol_df['Kolkata'])/4
diesel_df['mean_price'] = (diesel_df['Delhi']+diesel_df['Mumbai']+diesel_df['Chennai']+diesel_df['Kolkata'])/4
print("Adding percent change columns")
for i in petrol_df.columns[1:]:
petrol_df[f'{i}_pc'] = (((petrol_df[i] - petrol_df[i].iloc[-1])/petrol_df[i].iloc[-1]) * 100).round(2)
for i in diesel_df.columns[1:]:
diesel_df[f'{i}_pc'] = (((diesel_df[i] - diesel_df[i].iloc[-1])/diesel_df[i].iloc[-1]) * 100).round(2)
petrol_df.set_index("Date",inplace=True)
diesel_df.set_index("Date",inplace=True)
return petrol_df, diesel_df
def prep_consumption_df(consumption_df,year):
consumption_df.reset_index(inplace=True)
consumption_df.dropna(how='any',inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
#print(consumption_df)
cols = ['products', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
consumption_df.drop(consumption_df.index[0],inplace=True)
consumption_df.columns = cols
consumption_df = consumption_df.loc[(consumption_df['products']=='MS')|(consumption_df['products']=='HSD')].reset_index().drop(['index','Total'],axis=1)
melt_df = pd.melt(consumption_df, id_vars = 'products',var_name='month',value_name='average_cons')
melt_df.sort_values('products',inplace=True)
melt_df = melt_df.reset_index().drop('index',axis=1)
melt_df['year'] = year
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df['average_cons'] = melt_df['average_cons'].astype(float).round(2)
return melt_df
def prep_consumption_df_present(consumption_df,year):
consumption_df.reset_index().drop('index',inplace=True,axis=1)
consumption_df.drop(consumption_df.index[range(0,6)],inplace=True)
consumption_df.reset_index(inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
print(consumption_df)
consumption_df.drop(consumption_df.index[range(14,20)],inplace=True)
consumption_df.reset_index(inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
#print(consumption_df)
cols = ['products', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
consumption_df.drop(consumption_df.index[0],inplace=True)
consumption_df.columns = cols
consumption_df = consumption_df.loc[(consumption_df['products']=='MS')|(consumption_df['products']=='HSD')].reset_index().drop(['index','Total'],axis=1)
melt_df = | pd.melt(consumption_df, id_vars = 'products',var_name='month',value_name='average_cons') | pandas.melt |
#!/usr/bin/env python
import click
import numpy as np
import os
import pandas as pd
import re
import torch
from tqdm import tqdm
bar_format = "{percentage:3.0f}%|{bar:20}{r_bar}"
# Local imports
from architectures import PWM, get_metrics
from train import _get_seqs_labels_ids, _get_data_loader
from utils import get_file_handle
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
}
@click.command(no_args_is_help=True, context_settings=CONTEXT_SETTINGS)
@click.argument(
"meme_file",
type=click.Path(exists=True, resolve_path=True),
)
@click.argument(
"tsv_file",
type=click.Path(exists=True, resolve_path=True),
)
@click.option(
"-b", "--batch-size",
help="Batch size.",
type=int,
default=100,
show_default=True,
)
@click.option(
"-d", "--debugging",
help="Debugging mode.",
is_flag=True,
)
@click.option(
"-o", "--output-dir",
help="Output directory.",
type=click.Path(resolve_path=True),
default="./",
show_default=True,
)
@click.option(
"-p", "--prefix",
help="Output prefix.",
)
@click.option(
"-s", "--scoring",
help="Scoring function.",
type=click.Choice(["max", "sum"]),
default="max",
show_default=True,
)
def main(**args):
# Create output dir
if not os.path.exists(args["output_dir"]):
os.makedirs(args["output_dir"])
##############
# Load Data #
##############
# Initialize
device = "cuda" if torch.cuda.is_available() else "cpu"
# Get data
seqs, y_true, _ = _get_seqs_labels_ids(args["tsv_file"], args["debugging"])
# Get DataLoader
data_loader = _get_data_loader(seqs, y_true, args["batch_size"])
# Load model
pwms, names = _get_PWMs(args["meme_file"], resize_pwms=True,
return_log=True)
pwm_model = PWM(pwms, seqs.shape[2], args["scoring"]).to(device)
##############
# Score PWMs #
##############
# Initialize
idx = 0
scores = np.zeros((len(data_loader.dataset), pwm_model._options["groups"]))
with torch.no_grad():
for x, _ in tqdm(iter(data_loader), total=len(data_loader),
bar_format=bar_format):
# Prepare inputs
x = x.to(device)
# Get scores
s = pwm_model(x)
scores[idx:idx+x.shape[0], :] = s.cpu().numpy()
# Index increase
idx += x.shape[0]
###############
# AUC metrics #
###############
# Initialize
aucs = []
metrics = get_metrics()
# Compute AUCs
for i in range(len(names)):
y_score = scores[:, i]
aucs.append([names[i]])
for m in metrics:
aucs[-1].append(metrics[m](y_true, y_score))
###############
# Output AUCs #
###############
# Create DataFrame
df = | pd.DataFrame(aucs, columns=["PWM"]+[m for m in metrics]) | pandas.DataFrame |
#! python3
#import os
#os.environ["R_HOME"] = r""
#os.environ["path"] = r"C:\Users\localadmin\Anaconda3;C:\Users\localadmin\Anaconda3\Scripts;C:\Users\localadmin\Anaconda3\Library\bin;C:\Users\localadmin\Anaconda3\Library\mingw-w64\lib;C:\Users\localadmin\Anaconda3\Library\mingw-w64\bin;" + os.environ["path"]
import os
import settings
if settings.R_HOME:
os.environ["R_HOME"] = settings.R_HOME
import argparse
import msstats
import gostats
import pandas as pd
from io import StringIO
from get_uniprot import UniprotParser, UniprotSequence
import csv
parser = argparse.ArgumentParser(description="Automated workflow for processing PeakView data through MSstats and GOstats")
parser.add_argument("-i",
"-input_file",
type=str,
help="Filepath to experiment description file where each row has 5 columns, ion, fdr, out, treatment, "
"control", dest="i")
msstats_pvalue_cutoff = settings.msstats_cutoff
gostats_pvalue_cutoff = settings.gostats_cutoff
gostats_check = settings.gostats_check
def split_base(work):
work = pd.read_csv(work, sep="\t")
new_work = []
for i, r in work.iterrows():
r["out"] = r["out"].rstrip("/")
#print(r["out"])
#if ";" in r["control"] or ";" in r["treatment"]:
control = r["control"].split(";")
treatment = r["treatment"].split(";")
control_dict = {}
treatment_dict = {}
ion = | pd.read_csv(r["ion"]) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# Test for issue #15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH#19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
class TestFrameMulDiv(object):
"""Tests for DataFrame multiplication and division"""
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df % 0
expected = | pd.DataFrame(np.nan, index=df.index, columns=df.columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------
# **TD DSA 2021 de <NAME> - rapport de <NAME>**
# ------------------------- -------------------------------------
# # Analyse descriptive
# ## Setup
# In[5]:
get_ipython().system('pip install textblob')
# In[6]:
get_ipython().system('pip install emot')
# In[7]:
get_ipython().system('pip install wordcloud')
# In[8]:
#Temps et fichiers
import os
import warnings
import time
from datetime import timedelta
#Manipulation de données
import pandas as pd
import numpy as np
# Text
from collections import Counter
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
from nltk.util import ngrams
from textblob import TextBlob
import string
import re
import spacy
from emot.emo_unicode import UNICODE_EMO, EMOTICONS
#Visualisation
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from wordcloud import WordCloud
#Tracking d'expérience
import mlflow
import mlflow.sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# ### Utilisation du package
# In[9]:
#Cette cellule permet d'appeler la version packagée du projet et d'en assurer le reload avant appel des fonctions
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[10]:
from dsa_sentiment.scripts.make_dataset import load_data
from dsa_sentiment.scripts.evaluate import eval_metrics
from dsa_sentiment.scripts.make_dataset import Preprocess_StrLower, Preprocess_transform_target
# ### Configuration de l'experiment MLFlow
# In[11]:
mlflow.tracking.get_tracking_uri()
# ### Chargement des données
# In[12]:
# On Importe les données
#df
df_train=pd.read_parquet('/mnt/data/interim/df_train.gzip')
df_val=pd.read_parquet('/mnt/data/interim/df_val.gzip')
df_test=pd.read_parquet('/mnt/data/interim/df_test.gzip')
#X
X_train=pd.read_parquet('/mnt/data/interim/X_train.gzip')
X_val=pd.read_parquet('/mnt/data/interim/X_val.gzip')
X_test=pd.read_parquet('/mnt/data/interim/X_test.gzip')
#y
y_train=pd.read_parquet('/mnt/data/interim/y_train.gzip')
y_val=pd.read_parquet('/mnt/data/interim/y_val.gzip')
y_test=pd.read_parquet('/mnt/data/interim/y_test.gzip')
# ## EDA
# On commence par nalyser l'équilibre des différentes classes de sentiments
# In[13]:
df = df_train
df.head()
# ### Analyse de l'équilibre du jeu d'entrainement par label
# In[14]:
fig = px.histogram(df, x="sentiment", color="sentiment", title = 'Nombre de tweets par sentiment')
fig.show()
# Il existe un léger déséquilibre dans les classes en faveur des sentiments `neutral`
# ### Analyse des champs lexicaux par label
# Pour la suite des travaux, on créée un corpus contenant la concaténation de tous les tweets d'une certaine tonalité.
# In[15]:
def create_corpus(text_series):
text = text_series.apply(lambda x : x.split())
text = sum(text, [])
return text
# In[16]:
positive_text = create_corpus(df['text'][df['sentiment']=='positive'])
negative_text = create_corpus(df['text'][df['sentiment']=='negative'])
neutral_text = create_corpus(df['text'][df['sentiment']=='neutral'])
# Il devient alors possible de crééer des histogrammes représentant la fréquence de N-grams dans un corpus =donné
# In[17]:
def plot_freq_dist(text_corpus, nb=30, ngram=1, title=''):
'''
Plot the most common words
inputs:
text_corpus : a corpus of words
nb : number of words to plot
title : graph title
returns:
nothing, plots the graph
'''
freq_pos=Counter(ngrams(create_corpus(pd.Series(text_corpus)),ngram))
pos_df = pd.DataFrame({
"words":[' '.join(items) for items in list(freq_pos.keys())],
"Count":list(freq_pos.values())
})
common_pos= pos_df.nlargest(columns="Count", n=30)
fig = px.bar(common_pos, x="words", y="Count", labels={"words": "Words", "Count":"Frequency"}, title=title)
fig.show();
# In[18]:
plot_freq_dist(positive_text, title = 'Most common words associated with positive tweets')
# Le résultat montre la prépondérance des `stopwords`, ces mots d'articulation, qui sont très communs et gènent l'identifiaction de mots clefs propres à un document / ensemble de documents spécifiques.
#
# Il convient donc d'effectuer des opérations de retraitement du texte pour analyse.
# ### Preprocessing
# Parmi les éléments propres aux tweets qui peuvent avoir un impact sur la suite on compte :
#
# - les mots clefs marqués par un `#`
# - les noms d'utilisateurs commençant par un `@`
# - les emoticons et emojis
# - les nombre de mots en MAJUSCULES
# - la répétition de caractères pour marquer l'emphase `!!!!`, `looooong`, ou l'autocensure `f***`
# - les fautes de frappes (mots de moins de 2 caractères)
# Afin de disposer de traitements homogènes, repoductibles et paramétrables, une fonction spécifique est créée. Les différenst paramètres pourront être testés dans les phase de modélistaion ultérieures.
# source [preprocess](https://www.kaggle.com/stoicstatic/twitter-sentiment-analysis-for-beginners)
# In[57]:
def preprocess_text(text_series,
apply_lemmatizer=True,
apply_lowercase=True,
apply_url_standerdisation=True,
apply_user_standerdisation=True,
apply_emoticon_to_words=True,
apply_stopwords_removal=True,
apply_shortwords_removal=True,
apply_non_alphabetical_removal=True,
apply_only_2_consecutive_charac=True
):
'''
Main preprocess function
inputs:
text_series : a pandas Series object with text to preprocess
outputs:
a preprocessed pandas Series object
'''
processedText = []
if apply_lemmatizer:
# Create Lemmatizer and Stemmer.
wordLemm = WordNetLemmatizer()
# Defining regex patterns.
urlPattern = r"((http://)[^ ]*|(https://)[^ ]*|( www\.)[^ ]*)"
userPattern = '@[^\s]+'
alphaPattern = r"[^(\w|\*|(!){2}|#)]"
sequencePattern = r"(.)\1\1+"
seqReplacePattern = r"\1\1"
for tweet in text_series:
if apply_lowercase:
tweet = tweet.lower()
if apply_url_standerdisation:
# Replace all URls with 'URL'
tweet = re.sub(urlPattern,' URL',tweet)
if apply_user_standerdisation:
# Replace @USERNAME to 'USER'.
tweet = re.sub(userPattern,' USER', tweet)
if apply_emoticon_to_words:
# Replace all emojis.
for emo in EMOTICONS:
#refactor outputs so that we come up with a single word when/if text spliting afterwards
val = "_".join(EMOTICONS[emo].replace(",","").split())
val='EMO_'+val
tweet = tweet.replace(emo, ' '+val+' ')
for emot in UNICODE_EMO:
val = "_".join(UNICODE_EMO[emot].replace(",","").replace(":","").split())
val='EMO_'+val
tweet = tweet.replace(emo, ' '+val+' ')
if apply_only_2_consecutive_charac:
# Replace 3 or more consecutive letters by 2 letter.
tweet = re.sub(sequencePattern, seqReplacePattern, tweet)
if apply_non_alphabetical_removal:
# Replace all non alphabets.
tweet = re.sub(alphaPattern, " ", tweet)
tweetwords = ''
for word in tweet.split():
# Checking if the word is a stopword.
if apply_stopwords_removal:
if word in stopwords.words('english'):
word=''
else:
word=word
#if word not in stopwordlist:
if apply_shortwords_removal:
if len(word)<=1:
word=''
else:
word=word
# Lemmatizing the word.
if apply_lemmatizer:
word = wordLemm.lemmatize(word)
else:
word=word
tweetwords += (word+' ')
processedText.append(tweetwords)
return processedText
# In[20]:
positive_text_prepro = preprocess_text(df['text'][df['sentiment']=='positive'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[56]:
pd.Series(positive_text_prepro).head()
# In[21]:
neutral_text_prepro = preprocess_text(df['text'][df['sentiment']=='neutral'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[58]:
pd.Series(neutral_text_prepro).head()
# In[22]:
negative_text_prepro = preprocess_text(df['text'][df['sentiment']=='negative'], apply_lemmatizer=False, apply_non_alphabetical_removal=True)
# In[59]:
pd.Series(negative_text_prepro).head()
# ### Analyses des mots clefs des tweets positifs
# La fonction suivante permettra de réaliser des nuages de mots à partir d'un corpus
# In[23]:
def plotWc(text, stopwords=None, title=''):
wc = WordCloud(
stopwords=stopwords,
width=800,
height=400,
max_words=1000,
random_state=44,
background_color="white",
collocations=False
).generate(text)
plt.figure(figsize = (10,10))
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.title(title)
plt.show()
# In[24]:
plotWc(" ".join(positive_text_prepro), stopwords=stopwords.words('english'), title = "Wordcloud des tweets positifs")
# Les tweets positpositive_text_prepro marqués par la forte reprétsentation de mots à connotation positive `love`, `good`, `happy`.
#
# Cet a priori graphique peut être confirmé par un graphique de fréquence des mots individuels les plus présents
# In[26]:
plot_freq_dist(create_corpus(pd.Series(positive_text_prepro)), title = 'Most common words associated with positive tweets')
# In[27]:
plot_freq_dist(create_corpus(pd.Series(positive_text_prepro)), ngram=2, title = 'Most common 2grams associated with positive tweets')
# In[28]:
plot_freq_dist(create_corpus(pd.Series(positive_text_prepro)), ngram=3, title = 'Most common 3grams associated with positive tweets')
# In[29]:
plot_freq_dist(create_corpus( | pd.Series(positive_text_prepro) | pandas.Series |
#from subprocess import Popen, check_call
#import os
import pandas as pd
import numpy as np
import math
import PySimpleGUI as sg
import webbrowser
# Read Data
csv_path1 = "output/final_data.csv"
prop_df = pd.read_csv(csv_path1)
n = prop_df.shape[0]
prop_df.sort_values(by=["PRICE"],ascending=True,inplace=True)
prop_df.index = range(len(prop_df.index))
prop_df_old = prop_df.copy()
# Read Languages
csvLanguage = "data_sets/languages_spoken.csv"
lang_df = pd.read_csv(csvLanguage)
languages = [lang for lang in lang_df.columns.tolist() if lang not in ["Community Area","Community Area Name","PREDOMINANT NON-ENGLISH LANGUAGE (%)","TOTAL"]]
languages.sort()
# Add locations
local = prop_df["LOCATION"].unique().tolist()
local.sort()
local = ["NONE"] + local
sg.theme('BluePurple')
# House Fact Column
col_fact = [
[sg.Text('Address:',size=(12,1)),sg.Text(size=(30,1), key='address')],
[sg.Text('Location:',size=(12,1)),sg.Text(size=(30,1), key='location')],
[sg.Text('Price:',size=(12,1)),sg.Text(size=(30,1),key='price')],
[sg.Text('HOA:',size=(12,1)),sg.Text(size=(30,1),key='hoa')],
[sg.Text('Tax Year:',size=(12,1)),sg.Text(size=(30,1),key='taxYear')],
[sg.Text('Tax Assessed:',size=(12,1)),sg.Text(size=(30,1),key='assessTax')],
[sg.Text('SquareFeet:',size=(12,1)),sg.Text(size=(30,1), key='sqft')],
[sg.Text('Year Built:',size=(12,1)),sg.Text(size=(30,1),key='year')]
]
col_fact2 = [
[sg.Text('# of Beds:',size=(20,1)),sg.Text(size=(12,1),key='beds')],
[sg.Text('# of Bathrooms:',size=(20,1)),sg.Text(size=(12,1),key='baths')],
[sg.Text('Sold Date:',size=(20,1)),sg.Text(size=(12,1),key='soldDT')],
[sg.Text('Sold Price:',size=(20,1)),sg.Text(size=(12,1),key='soldP')],
[sg.Text('Zestimate:',size=(20,1)),sg.Text(size=(12,1),key='zest')],
[sg.Text('Est Tax:',size=(20,1)),sg.Text(size=(12,1),key='estTax')],
[sg.Text('Property Type:',size=(20,1)),sg.Text(size=(12,1),key="propType")]
]
# Commute Column
col_commute1 = [
[sg.Text('Commute Time:',size=(14,1)),sg.Text(size=(10,1),key='kommute')],
[sg.Text('# of Transfers:',size=(14,1)),sg.Text(size=(10,1),key='kommuteTransfers')],
[sg.Text('Walking Time:',size=(14,1)),sg.Text(size=(10,1),key='kommuteWalk')]
]
col_commute2 = [
[sg.Frame(layout=[[sg.Listbox(values=[],size=(20,5),key='kommuteSteps')]],title="Commute Steps:",title_color="blue")]
]
# Grocery Column
col_grocery = [
[sg.Frame(layout=[[sg.Listbox(values=[],size=(30,5),key='storeWalk')]],title="Grocery Stores(walking):",title_color="blue"),
sg.Frame(layout=[[sg.Listbox(values=[],size=(30,5),key='storeDrive')]],title="Grocery Stores(driving):",title_color="blue") ]
]
# Crime Column
col_crime = [
[sg.Text('GUN',size=(10,1)),sg.Text(size=(10,1),key='crimeGun')],
[sg.Text('MURDER',size=(10,1)),sg.Text(size=(10,1),key='crimeMurder')],
[sg.Text('DRUG',size=(10,1)),sg.Text(size=(10,1),key='crimeDrug')],
[sg.Text('HUMAN',size=(10,1)),sg.Text(size=(10,1),key='crimeHuman')],
[sg.Text('THEFT',size=(10,1)),sg.Text(size=(10,1),key='crimeTheft')],
[sg.Text('OTHER',size=(10,1)),sg.Text(size=(10,1),key='crimeOther')]
]
# SocioEconomic Column
col_socio = [
[sg.Text('Percent of aged 25+ without HS diploma:',size=(30,1)),sg.Text(size=(8,1),key='hsDiploma')],
[sg.Text('Percent of households below poverty:',size=(30,1)),sg.Text(size=(8,1),key='homePoverty')],
[sg.Text('Percent of housing crowded:',size=(30,1)),sg.Text(size=(8,1),key='homeCrowded')],
[sg.Text('Percent of aged 16+ unemployed:',size=(30,1)),sg.Text(size=(8,1),key='unemployed')],
[sg.Text('Percent aged under 18 or over 64:',size=(30,1)),sg.Text(size=(8,1),key='aged')],
[sg.Text('Per capita income:',size=(30,1)),sg.Text(size=(8,1),key='income')]
]
# Language Column
col_language = [
[sg.Text('Select Language 1: '),
sg.InputCombo(tuple(languages), key='lang1', default_value="CHINESE", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang1")],
[sg.Text('Select Language 2: '),
sg.InputCombo(tuple(languages), key='lang2', default_value="SPANISH", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang2")],
[sg.Text('Select Language 3: '),
sg.InputCombo(tuple(languages), key='lang3', default_value="POLISH", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang3")],
[sg.Text('Select Language 4: '),
sg.InputCombo(tuple(languages), key='lang4', default_value="RUSSIAN", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang4")],
[sg.Text('Select Language 5: '),
sg.InputCombo(tuple(languages), key='lang5', default_value="AFRICAN LANGUAGES", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang5")],
[sg.Text('Select Language 6: '),
sg.InputCombo(tuple(languages), key='lang6', default_value="GREEK", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang6")]
]
# Button Column
col_button = [
[sg.Button('',image_filename="images/thumbsDown.png",image_size=(100,100),image_subsample=5,border_width=0,key="dislike"),sg.Text(' ' * 25),
sg.Button('',image_filename="images/unsure.png",image_size=(100,100),image_subsample=3,border_width=0,key="unsure"),sg.Text(' ' * 25),
sg.Button('',image_filename="images/thumbsUp.png",image_size=(100,100),image_subsample=5,border_width=0,key="like") ]
]
# Score Column
col_score = [
[sg.Text("Your Rating: ",size=(15,1)),sg.Text(size=(10,1),key="rate")],
[sg.Text("Predicted Score: ",size=(15,1)),sg.Text(size=(10,1),key="score")]
]
layout = [[sg.Text('Is this house Hot or Not?',font=('Helvetica', 20))],
[sg.Frame(layout=[[sg.Text('User Select: '),sg.InputCombo(('MM','XY'),size=(10,1),key='user',default_value='MM',enable_events=True)]],title="SELECT USER",title_color="blue"),
sg.Frame(layout=[[sg.Text("View Select: "),sg.InputCombo(('ALL','UNRATED', 'RATED'), key='userRated', default_value="ALL", enable_events=True,size=(20, 1))]],
title="RATING VIEW",title_color="blue")],
[sg.Text('Sort by: '),
sg.InputCombo(('COMMUTE_TIME','WALKING_TIME', 'PRICE'), key='sortBy', default_value="PRICE", enable_events=True,size=(20, 1)),
sg.Radio("Ascending",group_id="radio1",key="ascend",default=True,enable_events=True),
sg.Radio("Descending",group_id="radio1",key="descend",enable_events=True),
sg.Button('Save Work and Exit'),
sg.Text(" "*5),sg.Column(col_score,background_color="red")],
[sg.Text('Filter by Location: '),
sg.InputCombo(local,key='filter', default_value="NONE", enable_events=True,size=(20, 1))],
[sg.Frame(layout = [[sg.Listbox(values=prop_df["ADDRESS"],
size=(30, 12), key='-home-', enable_events=True)]],title="Home Selection:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_fact,background_color="grey"),
sg.Column(col_fact2,background_color="grey")]],title="General Information:",title_color="blue")
],
[sg.Frame(layout = [[sg.Column(col_commute1,background_color="purple"),
sg.Column(col_commute2,background_color="purple")]],title="Commute Information:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_grocery,background_color="blue")]],title="Grocery Information:",title_color="blue")],
[sg.Frame(layout = [[sg.Column(col_crime,background_color="green")]],title="Crime Statistics:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_socio,background_color="magenta")]],title="Socioeconomic Statistics:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_language,background_color="orange")]],title="Language Spoken (%)",title_color="blue")],
[sg.Column(col_button,justification="center")]
]
window = sg.Window('Housing Dating App', layout)
while True: # Event Loop
event, values = window.read()
print(event, values)
print("EVENT: ", event)
print("VALUE: ", values)
if event in ["-home-"]:
print(values["-home-"][0])
i = prop_df["ADDRESS"].tolist().index(values["-home-"][0])
if event in ['Save Work and Exit',None]:
break
if event in ['sortBy','ascend','descend']:
print("ITEM1: ",values['sortBy'])
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
if event in ['filter','userRated','user']:
print("ITEM1: ",values['filter'])
print("ITEM2: ",values['userRated'])
if values['filter'] in ["NONE"]:
if values['userRated'] in ['ALL']:
prop_df = prop_df_old.copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
elif values['userRated'] in ['UNRATED']:
prop_df = prop_df_old.loc[pd.isnull(prop_df_old[values['user']+"_RATING"])].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
elif values['userRated'] in ['RATED']:
prop_df = prop_df_old.loc[pd.notnull(prop_df_old[values['user']+"_RATING"])].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
else:
if values['userRated'] in ['ALL']:
prop_df = prop_df_old.loc[prop_df_old["LOCATION"] == values["filter"]].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
elif values['userRated'] in ['UNRATED']:
prop_df = prop_df_old.loc[(prop_df_old["LOCATION"] == values["filter"]) & (pd.isnull(prop_df_old[values['user']+"_RATING"]))].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
elif values['userRated'] in ['RATED']:
prop_df = prop_df_old.loc[(prop_df_old["LOCATION"] == values["filter"]) & (pd.notnull(prop_df_old[values['user']+"_RATING"]))].copy()
prop_df.sort_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_df.index = range(len(prop_df.index))
window.Element("-home-").Update(prop_df["ADDRESS"])
n = prop_df.shape[0]
if event in ["lang1"]:
window['perLang1'].update(str(f'{prop_df[values["lang1"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang2"]:
window['perLang2'].update(str(f'{prop_df[values["lang2"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang3"]:
window['perLang3'].update(str(f'{prop_df[values["lang3"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang4"]:
window['perLang4'].update(str(f'{prop_df[values["lang4"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang5"]:
window['perLang5'].update(str(f'{prop_df[values["lang5"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["lang6"]:
window['perLang6'].update(str(f'{prop_df[values["lang6"]][i]/prop_df["TOTAL"][i]:.2%}'))
if event in ["-home-","like","unsure","dislike"]:
if n > 0:
id = prop_df_old["ADDRESS"].tolist().index(prop_df["ADDRESS"][i])
if event == "like":
prop_df_old.at[id,values['user']+"_RATING"] = 3
if values['userRated'] in ['UNRATED']:
prop_df.drop(prop_df.index[i],inplace=True)
prop_df.index = range(len(prop_df.index))
n = prop_df.shape[0]
if i == n:
i = n-1
window.Element("-home-").Update(prop_df["ADDRESS"])
else:
prop_df.at[i,values['user']+"_RATING"] = 3
if i < n-1:
i += 1
if event == "unsure":
prop_df_old.at[id,values['user']+"_RATING"] = 2
if values['userRated'] in ['UNRATED']:
prop_df.drop(prop_df.index[i],inplace=True)
prop_df.index = range(len(prop_df.index))
n = prop_df.shape[0]
if i == n:
i = n-1
window.Element("-home-").Update(prop_df["ADDRESS"])
else:
prop_df.at[i,values['user']+"_RATING"] = 2
if i < n-1:
i += 1
if event == "dislike":
prop_df_old.at[id,values['user']+"_RATING"] = 1
if values['userRated'] in ['UNRATED']:
prop_df.drop(prop_df.index[i],inplace=True)
prop_df.index = range(len(prop_df.index))
n = prop_df.shape[0]
if i == n:
i = n-1
window.Element("-home-").Update(prop_df["ADDRESS"])
else:
prop_df.at[i,values['user']+"_RATING"] = 1
if i < n-1:
i += 1
window.Element("-home-").update(set_to_index=i,scroll_to_index=max(0,i-3))
if n > 0:
webbrowser.open(prop_df['URL'][i])
#call_url = prop_df['URL'][i]
#mycmd = r'start chrome /new-tab {}'.format(call_url)
#try:
# os.system("taskkill /F /IM chrome.exe")
#except:
# pass
#p1 = Popen(mycmd,shell=True)
window['address'].update(prop_df['ADDRESS'][i])
window['location'].update(prop_df['LOCATION'][i])
if pd.isnull(prop_df['SQFT'][i]):
window['sqft'].update("")
else:
window['sqft'].update(math.floor(prop_df['SQFT'][i]))
if pd.isnull(prop_df['YEAR'][i]):
window['year'].update("")
else:
window['year'].update(prop_df['YEAR'][i])
if pd.isnull(prop_df['LAST_SOLD_DATE'][i]):
window['soldDT'].update("")
else:
window['soldDT'].update(prop_df['LAST_SOLD_DATE'][i])
if | pd.isnull(prop_df["ZESTIMATE"][i]) | pandas.isnull |
import os
import zipfile as zp
import pandas as pd
import numpy as np
import core
import requests
class Labels:
init_cols = [
'station_id', 'station_name', 'riv_or_lake', 'hydroy', 'hydrom', 'day',
'lvl', 'flow', 'temp', 'month']
trans_cols = [
'date', 'year', 'month', 'day', 'hydroy', 'hydrom', 'station_id', 'station_name',
'riv_or_lake', 'riv_or_lake_id', 'lvl', 'flow', 'temp']
def transform(trans_df):
trans_df = trans_df.reset_index().drop('index', axis=1)
dfc = trans_df.copy()
lstrip = 'AĄBCĆDEĘFGHIJKLŁMNŃOÓPQRSŚTUVWXYZŹŻaąbcćdeęfghijklłmnńoópqrsśtuvwxyzźż( '
rivlakeid = dfc['riv_or_lake'].map(lambda x: x.lstrip(lstrip).rstrip(')'))
trans_df['riv_or_lake'] = trans_df['riv_or_lake'].map(lambda x: x.rstrip(' ()1234567890 '))
trans_df['riv_or_lake_id'] = rivlakeid
trans_df['month'] = trans_df['month'].fillna(method='ffill').astype(int)
trans_df['day'] = trans_df['day'].fillna(method='ffill').astype(int)
trans_df['year'] = trans_df['hydroy']
trans_df.loc[(trans_df['month'] == 11) | (trans_df['month'] == 12), 'year'] = trans_df['year'].astype(int) - 1
trans_df['date'] = | pd.to_datetime(trans_df[['year', 'month', 'day']]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# e_bb_retriever
# <NAME>
version = 'e_bb_retriever.v.9.0.0'
# Python modules
import os
import pickle as pic
import argparse
# External modules
import pandas as pd
# Local modules
from classes.libdesign import LibDesign
from classes.logger import Logger
if __name__ == '__main__':
# Arg parser
parser = argparse.ArgumentParser(description="""e_bb_retreaver collects all the building blocks for a giver
libDESIGN into csv files in the folder that is given through arguments. The input arguments are wfolder (the
folder system of eDESIGNER, tk: token, the combination of the Db_Run time stamp and run number separated by _,
dn: the design number or a list of design numbers and of: the output folder. There are two files created for each
cycle and named ddn_Cn.csv and dn_Cn_all.csv where n is the cycle number and ddn is the design number.
These files contain the building block smiles and ids that should be used to obtain the maximum size of the
library while maintaining the heavy atom distribution. The output folder will be created if it does not already
exist""")
parser.add_argument('-wf', '--wfolder', help='Working Folder', type=str, default='./')
parser.add_argument('-tk', '--token', help='combination of Db_Run time stamp and run number separated by _',
type=str, default=None)
parser.add_argument('-df', '--design_number',
help='number of the design of interest or a list of designs , separated', type=str,
default=None)
parser.add_argument('-of', '--output_folder', help='folder where the files will be saved', type=str, default=None)
args = parser.parse_args()
assert args.token is not None, 'A token must be provided'
assert args.design_number is not None, 'A design number or a list of design numbers must be provided'
assert args.output_folder is not None, 'An output folder must be provided'
args.design_number = args.design_number.split(',')
args.design_number = [item.strip(' ') for item in args.design_number]
args.design_number = [int(item) for item in args.design_number]
# Initialization
log = Logger(os.path.join(args.wfolder, 'logs', args.token + '_lib_design_interpreter.log'))
log.update(version)
try: # Try to create the output folder but do not delete it if it aready exists
os.mkdir(args.output_folder)
except:
pass
# Body of the script
log.update('Loading pickled designs to object...')
with open(os.path.join(args.wfolder, 'results', args.token + '_libDESIGNS.pic'), 'rb') as f:
lib_list = []
while True:
try:
lib_list.append(pic.load(f))
except: # reached the end of the file
break
log.update('Creating dataframes and dumping them into files...')
for lib_id in args.design_number:
design = lib_list[lib_id]
if design.id != lib_id:
log.update(f'Design {lib_id} in list does not match with design.id {design.id}')
else:
for cycle in range(design.n_cycles):
all_dfs = []
int_dfs = []
for bbt, limit in zip(design.bbts[cycle], design.int_limits[cycle]):
if limit > 0:
# It might be that a bbt does not have internal compounds but it does have all
# compounds and therefore the file for internal compounds does not exists.
# That is why we need to check for the number of compounds to avoid an I/O exception
df_int = pd.read_csv(os.path.join(args.wfolder, 'comps', args.token.split('_')[0], str(bbt) + '.int.smi'),
sep=' ', header=None, names=['smiles', 'id'], nrows=limit)
int_dfs.append(df_int.copy())
for bbt, limit in zip(design.bbts[cycle], design.all_limits[cycle]):
if limit > 0: # This should not be necessary, but just in case
df_all = pd.read_csv(os.path.join(args.wfolder, 'comps', args.token.split('_')[0], str(bbt) + '.smi'),
sep=' ', header=None, names=['smiles', 'id'], nrows=limit)
all_dfs.append(df_all.copy())
all_dfs = pd.concat(all_dfs)
int_dfs = | pd.concat(int_dfs) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 9 13:55:53 2021
@author: Clement
"""
import pandas
import geopandas as gpd
import numpy
import os
import sys
import datetime
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from gen_fct import file_fct
from gen_fct import df_fct
def last_update_db (dir_name, db_list):
list_dir, list_files = file_fct.list_dir_files(f'{dir_name}')
db_daily = db_list[db_list.loc[:,'update']==True]
if 'last_update.json' in list_files:
last_update = pandas.read_json(f'{dir_name}/last_update.json', orient = "table")
last_update['delta_day'] = last_update.apply(lambda x: (pandas.to_datetime('today')-x["date"]).days,axis=1)
print(last_update)
print('\n')
else:
last_update = | pandas.DataFrame(index=db_daily.index, columns=['date', 'delta_day']) | pandas.DataFrame |
from tracemalloc import Statistic
from turtle import color
from unittest import result
import pandas as pd
import numpy as np
import scipy
from scipy.stats import norm
from scipy.optimize import minimize
import ipywidgets as widgets
from IPython.display import display
def drawdown(return_series: pd.Series, amount: float = 1000):
"""
Takes a time series of asset returns
Computes and returns a DataFrame that contains:
the wealth index
the previous peaks
percent drawdowns
"""
wealth_index = amount * (1 + return_series).cumprod()
previous_peaks = wealth_index.cummax()
drawdowns = (wealth_index - previous_peaks) / previous_peaks
return pd.DataFrame({
'Wealth': wealth_index,
'Peaks': previous_peaks,
'Drawdowns': drawdowns
})
def get_hfi_returns():
"""
Load and format the EDHEC Hedge Fund Index Returns
"""
hfi = pd.read_csv('data/edhec-hedgefundindices.csv',
header=0, index_col=0, parse_dates=True)
hfi = hfi/100
hfi.index = hfi.index.to_period('M')
return hfi
def get_ind_returns():
"""
Load and format the Ken French 30 Industry Portfolios Value Wighted Monthly Returns
"""
ind = pd.read_csv('data/ind30_m_vw_rets.csv', header=0,
index_col=0, parse_dates=True) / 100
ind.index = pd.to_datetime(ind.index, format='%Y%m').to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_total_market_index_returns():
"""
"""
ind_return = get_ind_returns()
ind_nfirms = get_ind_nfirms()
ind_size = get_ind_size()
ind_mktcap = ind_nfirms * ind_size
total_mktcap = ind_mktcap.sum(axis='columns')
ind_capweight = ind_mktcap.divide(total_mktcap, axis='rows')
total_market_return = (ind_capweight * ind_return).sum(axis='columns')
return total_market_return
def get_ind_nfirms():
"""
"""
ind = pd.read_csv('data/ind30_m_nfirms.csv', header=0,
index_col=0, parse_dates=True)
ind.index = pd.to_datetime(ind.index, format='%Y%m').to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def get_ind_size():
"""
"""
ind = pd.read_csv('data/ind30_m_size.csv', header=0,
index_col=0, parse_dates=True)
ind.index = pd.to_datetime(ind.index, format='%Y%m').to_period('M')
ind.columns = ind.columns.str.strip()
return ind
def semideviation(r):
"""
Returns the semideviation aka negative semideviation of r
r must be a Series or a Dataframe
"""
is_negative = r < 0
return r[is_negative].std(ddof=0)
def skewness(r):
"""
Alternative to scipy.stats.skew()
Computes the skewness of the supplied Series or DataFrame Returns
a float or a Series
"""
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r ** 3).mean()
return exp/sigma_r ** 3
def kurtosis(r):
"""
Alternative to scipy.stats.kurtosis()
Computes the kurtosis of the supplied Series or DataFrame Returns
a float or a Series
"""
demeaned_r = r - r.mean()
# use the population standard deviation, so set dof=0
sigma_r = r.std(ddof=0)
exp = (demeaned_r ** 4).mean()
return exp/sigma_r ** 4
def is_normal(r, level=0.01):
"""
Applies the Jarque-Bera test to determine if a Series is normal or not
Test is applied at the 1% level by default
Returns True if the hypothesis of normality is accepted, False otherwise
"""
statistic, p_value = scipy.stats.jarque_bera(r)
return p_value > level
def var_historic(r, level=5):
"""
Returns the historic Value at Risk at a specified level
i.e. returns the number such that "level" percent of the returns
fall bellow that number, and the level (100-level) percent are above
"""
if isinstance(r, pd.DataFrame):
return r.aggregate(var_historic, level=level)
elif isinstance(r, pd.Series):
return -np.percentile(r, level)
else:
raise TypeError('Expected r to be Series or DataFrame')
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gaussian VaR of a Series or DataFrame
"""
# compute the Z score assuming it was Gaussian
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1) * s/6 +
(z**3 - 3*z) * (k-3)/24 -
(2*z**3 - 5*z) * (s**2)/36
)
return -(r.mean() + z*r.std(ddof=0))
def cvar_historic(r, level=5):
"""
Computes the Conditional VaR of Series or DataFrame
"""
if isinstance(r, pd.Series):
is_beyond = r <= -var_historic(r, level=level)
return -r[is_beyond].mean()
elif isinstance(r, pd.DataFrame):
return r.aggregate(cvar_historic, level=level)
else:
raise TypeError('Expected r to be Series or DataFrame')
def annualized_return(r):
n_months = r.shape[0]
r = r / 100
annualized_return = (r + 1).prod() ** (12/n_months) - 1
return annualized_return
def annualized_volatility(r):
r = r / 100
return r.std() * np.sqrt(12)
def annualized_rets(r, periods_per_year):
"""
Annualizes a set of returns
We should infer the periods per year
"""
compunded_growth = (1 + r).prod()
n_periods = r.shape[0]
return compunded_growth ** (periods_per_year/n_periods) - 1
def annualized_vol(r, periods_per_year):
"""
Annualizes the volatility of a set of returns
We should infer the periods per year
"""
return r.std() * (periods_per_year ** 0.5)
def sharp_ratio(r, riskfree_rate, periods_per_year):
"""
Computes the annualized sharpe ratio of a set of returns
"""
rf_per_period = (1 + riskfree_rate) ** (1 / periods_per_year) - 1
excess_ret = r - rf_per_period
ann_ex_ret = annualized_rets(excess_ret, periods_per_year)
ann_vol = annualized_vol(r, periods_per_year)
return ann_ex_ret / ann_vol
def portfolio_return(weights, returns):
"""
Calculate portfolio returns
Weights -> Returns
"""
return weights.T @ returns
def portfolio_vol(weights, covmat):
"""
Calculate portfolio volatility
Weights -> Vol
"""
return (weights.T @ covmat @ weights) ** 0.5
def plot_ef2(n_points, er, cov, style='.-'):
"""
Plots the 2-asset efficient frontier
"""
if er.shape[0] != 2 or er.shape[0] != 2:
raise ValueError('plot_ef2 can only plot 2-asset frontiers')
weights = [np.array([w, 1-w]) for w in np.linspace(0, 1, n_points)]
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
'Returns': rets,
'Volatility': vols
})
return ef.plot.line(x='Volatility', y='Returns', style=style)
def optimal_weights(n_points, er, cov):
"""
-> list of weights ro run the optimizer on to minimize the volatility
"""
target_rs = np.linspace(er.min(), er.max(), n_points)
weights = [minimize_vol(target_return, er, cov)
for target_return in target_rs]
return weights
def gmv(cov: pd.DataFrame) -> pd.DataFrame:
"""
Returns the weights of the Global Minimum Volatility portfolio
by given covariance matrix
"""
n = cov.shape[0]
return max_sharp_ratio(0, np.repeat(1, n), cov)
def plot_ef(n_points: int, er: pd.DataFrame, cov: pd.DataFrame, show_cml=False, style='.-', riskfree_rate=0, show_ew=False, show_gmv=False) -> pd.DataFrame:
"""
Plots the N-asset efficient frontier
show_ew = equally wighted
"""
weights = optimal_weights(n_points, er, cov)
rets = [portfolio_return(w, er) for w in weights]
vols = [portfolio_vol(w, cov) for w in weights]
ef = pd.DataFrame({
'Returns': rets,
'Volatility': vols
})
ax = ef.plot.line(x='Volatility', y='Returns', style=style)
if show_ew:
n = er.shape[0]
w_ew = np.repeat(1/n, n)
r_ew = portfolio_return(w_ew, er)
vol_ew = portfolio_vol(w_ew, cov)
# display EW
ax.plot([vol_ew], [r_ew], color='goldenrod', marker='o', markersize=10)
if show_gmv:
w_gmv = gmv(cov)
r_gmv = portfolio_return(w_gmv, er)
vol_gmv = portfolio_vol(w_gmv, cov)
# display GMV
ax.plot([vol_gmv], [r_gmv], color='midnightblue',
marker='o', markersize=10)
if show_cml:
ax.set_xlim(left=0)
weights_msr = max_sharp_ratio(riskfree_rate, er, cov)
returns_msr = portfolio_return(weights_msr, er)
volatility_msr = portfolio_vol(weights_msr, cov)
# Add Capital Market Line
cml_x = [0, volatility_msr]
cml_y = [riskfree_rate, returns_msr]
ax.plot(cml_x, cml_y, color='green', marker='o',
linestyle='dashed', markersize=12, linewidth=2)
return ax
def minimize_vol(target_return, er, cov):
"""
target_ret -> w
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n
return_is_target = {
'type': 'eq',
'args': (er,),
'fun': lambda weights, er: target_return - portfolio_return(weights, er)
}
weights_sum_to_1 = {
'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
results = minimize(
portfolio_vol,
init_guess, args=(cov,),
method='SLSQP',
options={'disp': False},
constraints=(return_is_target, weights_sum_to_1), bounds=bounds)
return results.x
def negative_sharp_ratio(weights, riskfree_rate, er, cov):
"""
Returns the negative of the sharp ratio, given wights
"""
r = portfolio_return(weights, er)
vol = portfolio_vol(weights, cov)
return -(r - riskfree_rate) / vol
def max_sharp_ratio(riskfree_rate, er, cov):
"""
Returns the weights of the portfolio that gives you the maximum sharp ratio
given the riskfree rate and expected returns and a covariance matrix
"""
n = er.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n
weights_sum_to_1 = {
'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
results = minimize(
negative_sharp_ratio,
init_guess,
args=(riskfree_rate, er, cov,),
method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1), bounds=bounds)
return results.x
def run_cppi(risky_return, safe_return=None, multiplier=3, start=1000, floor=0.8, riskfree_rate=0.03, drawdown=None):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
"""
# 1. Cushion - (Asset Value - Floor Value)
# 2. Compute allocation to the safe and risky assets -> m * risk_budget
# 3. Recompute the asset value based on the returns
# set up the CPPI parameters
dates = risky_return.index
number_steps = len(dates)
account_value = start
floor_value = start * floor
peak = start
if isinstance(risky_return, pd.Series):
risky_return = pd.DataFrame(risky_return, columns=['R'])
if safe_return is None:
safe_return = pd.DataFrame().reindex_like(risky_return)
# fast way to set all values to a number
safe_return.values[:] = riskfree_rate / 12
account_history = pd.DataFrame().reindex_like(risky_return)
cushion_history = | pd.DataFrame() | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = | pd.DataFrame([1, 2, 3], index=tdi) | pandas.DataFrame |
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause-Clear
# Copyright (c) 2019, The Numerical Algorithms Group, Ltd. All rights reserved.
"""Shared routines for different Metric Sets
"""
from warnings import warn
import numpy
import pandas
from ..trace import Trace
from ..traceset import TraceSet
from .._plotsettings import pypop_mpl_params, figparams
__all__ = ["Metric", "MetricSet"]
class Metric:
"""Individual performance metrics to be used within a metricset. Defines metric name,
properties and method of calculation.
"""
def __init__(
self,
key,
level,
displayname=None,
desc=None,
is_inefficiency=False,
freq_corr=False,
):
"""
Parameters
----------
key: str
Key by which to identify metric.
level: int
Level at which to display metric in the stack.
displayname: str or None
Display name to use for metric in table etc. Defaults to key.
desc: str or None
Detailed description of the metric.
is_inefficiency: bool
Tag metric as an inefficiency (rather than efficiency) for correct display
and shading. Default False.
freq_corr: bool
Correct performance metrics based on average clock frequency (use to
correct for node dynamic clocking issues). Default False.
"""
self.key = key
self.level = level
self.description = str(desc) if desc else ""
self.is_inefficiency = is_inefficiency
if displayname:
self.displayname = r"↪ " * bool(self.level) + displayname
else:
self.displayname = r"↪ " * bool(self.level) + self.key
class MetricSet:
"""Calculate and plot POP MPI metrics
Statistics data is expected to have been produced with `collect_statistics()`
Attributes
----------
metric_data
metric_definition
"""
_programming_model = None
_default_metric_key = "Number of Processes"
_default_group_key = None
_default_scaling_key = "Total Threads"
_key_descriptions = {
"Number of Processes": "",
"Threads per Process": "",
"Total Threads": "",
"Hybrid Layout": "",
"Tag": "",
}
def __init__(self, stats_data, ref_key=None, sort_keys=True):
"""
Parameters
----------
stats_data: TraceSet instance, dict, iterable or instance of Trace
Statistics as collected with `collect_statistics()`. Dictionary keys will be
used as the dataframe index. If a list, a dict will be constructed by
enumeration.
ref_key: str or None
Key of stats_dict that should be used as the reference for calculation of
scaling values. By default the trace with smallest number of processes and
smallest number of threads per process will be used.
sort_keys: bool
If true (default), lexically sort the keys in the returned DataFrame.
"""
self._stats_dict = MetricSet._dictify_stats(stats_data)
self._metric_data = None
self._sort_keys = sort_keys
self._ref_key = (
self._choose_ref_key(self._stats_dict) if ref_key is None else ref_key
)
def _calculate_metrics(self):
raise NotImplementedError
def _repr_html_(self):
return self.metric_data._repr_html_()
@staticmethod
def _choose_ref_key(stats_dict):
""" Take the stats dict and choose an appropriate reference trace.
As a default choice choose the smallest number of total threads, breaking ties
with smallest number of threads per process
"""
return min(
stats_dict.items(),
key=lambda x: "{:05}_{:05}_{}".format(
sum(x[1].metadata.threads_per_process),
max(x[1].metadata.threads_per_process),
x[1].metadata.tag,
),
)[0]
@property
def metric_data(self):
"""pandas.DataFrame: Calculated metric data.
"""
if self._metric_data is None:
self._calculate_metrics(ref_key=self._ref_key)
return self._metric_data
@staticmethod
def _dictify_stats(stats_data):
if isinstance(stats_data, TraceSet):
return {k: v for k, v in enumerate(stats_data.traces)}
else:
if isinstance(stats_data, Trace):
return {0: stats_data}
if not isinstance(stats_data, dict):
stats_data = {k: v for k, v in enumerate(stats_data)}
for df in stats_data.values():
if not isinstance(df, Trace):
raise ValueError("stats_dict must be an iterable of pypop.trace.Trace")
return stats_data
@property
def metrics(self):
"""List of :py:class:`pypop.metrics.Metric`: List of metrics that will be
calculated.
"""
return self._metric_list
def _create_subdataframe(self, metadata, idxkey):
if len(set(metadata.threads_per_process)) != 1:
warn(
"The supplied trace has a varying number of threads per process. "
"The PyPOP metrics were designed assuming a homogenous number of "
"threads per process -- analysis results may be inaccurate."
)
layout_keys = {
"Number of Processes": pandas.Series(
data=[metadata.num_processes], index=[idxkey]
),
"Threads per Process": pandas.Series(
data=[metadata.threads_per_process[0]], index=[idxkey]
),
"Total Threads": pandas.Series(
data=[sum(metadata.threads_per_process)], index=[idxkey]
),
"Hybrid Layout": pandas.Series(
data=[
"{}x{}".format(
metadata.num_processes, metadata.threads_per_process[0]
)
],
index=[idxkey],
),
"Tag": pandas.Series(data=[metadata.tag], index=[idxkey]),
}
for metric in self._metric_list:
layout_keys[metric.key] = | pandas.Series(data=[0.0], index=[idxkey]) | pandas.Series |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename).drop_duplicates().dropna()
# Drop not needed features
df.drop(['id'], axis=1, inplace=True)
df.drop(['lat'], axis=1, inplace=True)
df.drop(['long'], axis=1, inplace=True)
# Edit date to datetime pandas
df['date'] = pd.to_datetime(df['date'], format="%Y%m%dT%f", errors='coerce')
# only positive numbers
lst = ["price", "sqft_living", "sqft_lot", "sqft_above", "yr_built",
"sqft_living15", "sqft_lot15", "bathrooms",
"floors"]
for feature in lst:
df = df[df[feature] > 0]
# checks where there is a basement
df['has_basement'] = np.where(df['sqft_basement'] > 0, 1, 0)
# renovated in the last 10 years
df['new_renovation'] = np.where( | pd.DatetimeIndex(df['date']) | pandas.DatetimeIndex |
import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
from matplotlib import cm
# import matplotlib
from adjustText import adjust_text
import re
import matplotlib.patheffects as pe
import scipy.stats as st
# deprecated
def plot_hist_exp_1(results, household_size, pool_size, prevalence):
fnr_indep = results[:, 0]
fnr_correlated = results[:, 1]
eff_indep = results[:, 2]
eff_correlated = results[:, 3]
test_indep = results[:, 4]
test_correlated = results[:, 5]
fig, [ax0, ax1] = plt.subplots(1,2, figsize=(10,6))
ax0.hist([fnr_indep, fnr_correlated], label=['naive pooling', 'correlated pooling'], color=['mediumaquamarine', 'mediumpurple'])
ax0.legend(loc='upper right')
ax0.set_xlabel('$FNR$')
ax0.set_ylabel('Frequency')
ax0.set_title('FNR values under naive and\ncorrelated pooling')
ax1.hist(fnr_indep - fnr_correlated, color='lightskyblue', rwidth=0.7)
ax1.set_title('difference in FNR values')
ax1.set_ylabel('Frequency')
plt.tight_layout()
plt.savefig('../figs/experiment_1/fnr_diff_pool-size={}_household-size={}_prevalence={}.pdf'.format(pool_size, household_size, prevalence))
plt.close()
fig, [ax0, ax1] = plt.subplots(1,2, figsize=(10,6))
ax0.hist([test_indep, test_correlated], label=['naive pooling', 'correlated pooling'], color=['mediumaquamarine', 'mediumpurple'])
ax0.legend(loc='upper right')
ax0.set_xlabel('$\#$ followup tests per positive identified')
ax0.set_ylabel('Frequency')
ax0.set_title('$\#$ followup tests per positive identified under\nnaive and correlated pooling')
ax1.hist(test_indep - test_correlated, color='lightskyblue', rwidth=0.7)
ax1.set_title('difference in $\#$ followup tests per positive identified')
ax1.set_ylabel('Frequency')
plt.tight_layout()
plt.savefig('../figs/experiment_1/relative_test_consumption_pool-size={}_household-size={}_prevalence={}.pdf'.format(pool_size, household_size, prevalence))
plt.close()
return
# deprecated
def generate_heatmap_plots_for_exp_1():
dir = '../results/experiment_1'
aggregate_results = {}
for filename in os.listdir(dir):
if filename == ".DS_Store" or not filename.endswith('.data'):
continue
parts = re.split('=|[.](?!\d)|_', filename)
print(parts)
household_size = int(parts[4])
prevalence = float(parts[6])
filedir = os.path.join(dir, filename)
with open(filedir) as f:
results = np.loadtxt(f)
avgs = np.mean(results, axis=0)
aggregate_results[(prevalence, household_size)] = avgs
df_agg = pd.DataFrame.from_dict(aggregate_results, orient='index', columns=['indep fnr', 'corr fnr', 'indep eff', 'corr eff', 'indep test', 'corr test'])
df_agg.index = pd.MultiIndex.from_tuples(df_agg.index, names=['prevalence', 'household size'])
df_agg = df_agg.reset_index()
df_agg = df_agg.sort_values(by=['prevalence', 'household size'])
df_agg['indep sn'] = 1 - df_agg['indep fnr']
df_agg['corr sn'] = 1 - df_agg['corr fnr']
df_agg['sn diff'] = df_agg['corr sn'] - df_agg['indep sn']
df_agg['rel test consumption'] = df_agg['corr test'] / df_agg['indep test']
fig, [ax0, ax1] = plt.subplots(1, 2, figsize=(8, 4))
table_sn = pd.pivot_table(df_agg, values='sn diff', index=['household size'], columns=['prevalence'])
print(table_sn)
heatmap = ax0.pcolor(table_sn, cmap=cm.BuPu)
ax0.set_aspect('equal')
ax0.set_yticks(np.arange(0.5, len(table_sn.index), 1))
ax0.set_yticklabels(table_sn.index)
ax0.set_xticks(np.arange(0.5, len(table_sn.columns), 1))
ax0.set_xticklabels(table_sn.columns)
ax0.set_xlabel('prevalence')
ax0.set_ylabel('household size')
ax0.set_title('Difference in FNR')
fig.colorbar(heatmap, ax=ax0, orientation="horizontal")
table_test = pd.pivot_table(df_agg, values='rel test consumption', index=['household size'], columns=['prevalence'])
heatmap = ax1.pcolor(table_test, cmap=cm.YlGn_r)
ax1.set_aspect('equal')
ax1.set_yticks(np.arange(0.5, len(table_test.index), 1))
ax1.set_yticklabels(table_test.index)
ax1.set_xticks(np.arange(0.5, len(table_test.columns), 1))
ax1.set_xticklabels(table_test.columns)
ax1.set_xlabel('prevalence')
ax1.set_ylabel('household size')
ax1.set_title('Relative test consumption')
fig.colorbar(heatmap, ax=ax1, orientation="horizontal")
fig.tight_layout()
fig.savefig('../figs/experiment_1/tmp_heapmap_for_fnr_and_test.pdf', bbox_inches='tight')
plt.clf()
return
def plot_hist_exp_2(results, param, val=None):
fnr_indep = results[:, 0]
fnr_correlated = results[:, 1]
eff_indep = results[:, 2]
eff_correlated = results[:, 3]
# print Sn (naive), Sn (correlated), Eff (naive), Eff (correlated)
num_iters = results.shape[0]
pool_size = 6.
f = open(f"../results/experiment_2/nominal_scenario_results_{num_iters}.txt", "w")
f.write(f"sensitivity: {1 - np.mean(fnr_indep):.1%} (naive), {1 - np.mean(fnr_correlated):.1%} (correlated);\
efficiency: {np.mean(eff_indep):.2f} (naive), {np.mean(eff_correlated):.2f} (correlated)\n")
f.write(f"standard error: {np.std(fnr_indep)/np.sqrt(num_iters)}, {np.std(fnr_correlated)/np.sqrt(num_iters)}, \
{np.std(eff_indep)/np.sqrt(num_iters)}, {np.std(eff_correlated)/np.sqrt(num_iters)}\n")
f.write(f"improvement: {(1 - np.mean(fnr_correlated)) / (1 - np.mean(fnr_indep))-1:.2%} (sensitivity); \
{np.mean(eff_correlated) / np.mean(eff_indep)-1:.2%} (efficiency)\n")
frac_sample_indiv_test_naive = 1 / np.mean(eff_indep) - 1 / pool_size
frac_sample_indiv_test_correlated = 1 / np.mean(eff_correlated) - 1 / pool_size
frac_positive_sample_indiv_test_naive = 0.01 * (1 - np.mean(fnr_indep)) / 0.95
frac_positive_sample_indiv_test_correlated = 0.01 * (1 - np.mean(fnr_correlated)) / 0.95
frac_negative_sample_indiv_test_naive = frac_sample_indiv_test_naive - frac_positive_sample_indiv_test_naive
frac_negative_sample_indiv_test_correlated = frac_sample_indiv_test_correlated - frac_positive_sample_indiv_test_correlated
f.write(f"fraction of samples tested individually: {frac_sample_indiv_test_naive:.2%} (naive), {frac_sample_indiv_test_correlated:.2%} (correlated)\n")
f.write(f"fraction of positive samples tested individually: {frac_positive_sample_indiv_test_naive:.2%} (naive), {frac_positive_sample_indiv_test_correlated:.2%} (correlated)\n")
f.write(f"fraction of negative samples tested individually: {frac_negative_sample_indiv_test_naive:.2%} (naive), {frac_negative_sample_indiv_test_correlated:.2%} (correlated)\n")
f.write(f"implied FPR: {frac_negative_sample_indiv_test_naive * 0.0001} (naive), {frac_negative_sample_indiv_test_correlated * 0.0001} (correlated)\n")
f.close()
ax1 = plt.subplot(111)
n, bins, patches = ax1.hist(results[:, :2], label=['naive', 'correlated'], color=['mediumaquamarine', 'mediumpurple'])
hatches = [".", '//']
for patch_set, hatch in zip(patches, hatches):
for patch in patch_set.patches:
patch.set_hatch(hatch)
patch.set_edgecolor('k')
plt.legend(loc='upper right')
plt.xlabel('False negative rate')
plt.ylabel('Frequency')
if param == 'nominal':
plt.title('Histogram of FNR values under {} scenario'.format(param))
plt.savefig('../figs/experiment_2/fnr_{}_scenario.pdf'.format(param))
else:
plt.title('Histogram of FNR values for one-stage group testing \n under {} = {}'.format(param, val))
plt.savefig('../figs/experiment_2/fnr_{}={}.pdf'.format(param, val), dpi=600)
plt.close()
ax2 = plt.subplot(111)
n, bins, patches = ax2.hist(results[:, 2:], label=['naive', 'correlated'], color=['mediumaquamarine', 'mediumpurple'])
hatches = ["..", '//']
for patch_set, hatch in zip(patches, hatches):
for patch in patch_set.patches:
patch.set_hatch(hatch)
plt.legend(loc='upper right')
plt.xlabel('Efficiency')
plt.ylabel('Frequency')
if param == 'nominal':
plt.title('Histogram of testing efficiency under {} scenario'.format(param))
plt.savefig('../figs/experiment_2/eff_{}_scenario.pdf'.format(param))
else:
plt.title('Histogram of testing efficiency for one-stage group testing \n under {} = {}'.format(param, val))
plt.savefig('../figs/experiment_2/eff_{}={}.pdf'.format(param, val), dpi=600)
plt.close()
return
def generate_sensitivity_plots(param):
dir = '../results/experiment_2/sensitivity_analysis_2000/'
fnr_indep = []
fnr_corr = []
eff_indep = []
eff_corr = []
index = []
for filename in os.listdir(dir):
if param in filename:
val = filename.split(param, 1)[1][:-5]
val = val.split('_', 1)[0][1:]
val = int(val) if param == 'pool size' else val if param == 'household dist' else float(val)
filedir = os.path.join(dir, filename)
with open(filedir) as f:
results = np.loadtxt(f)
avgs = np.mean(results, axis=0)
fnr_indep.append(avgs[0])
fnr_corr.append(avgs[1])
eff_indep.append(avgs[2])
eff_corr.append(avgs[3])
index.append(val)
df = pd.DataFrame({'FNR (naive)': fnr_indep, 'FNR (correlated)': fnr_corr, 'efficiency (naive)': eff_indep,'efficiency (correlated)': eff_corr}, index=index)
df = df.sort_index()
df = df.rename_axis(param).reset_index()
df['sensitivity (naive)'] = 1 - df['FNR (naive)']
df['sensitivity (correlated)'] = 1 - df['FNR (correlated)']
fig, ax = plt.subplots()
ax2 = ax.twinx()
#fnrs = df[['FNR (naive)', 'FNR (correlated)']].plot.bar(ax=ax, legend=False, color=['mediumaquamarine', 'mediumpurple'], alpha=1)
sns = df[['sensitivity (naive)', 'sensitivity (correlated)']].plot.bar(ax=ax, legend=False, color=['mediumaquamarine', 'mediumpurple'], alpha=1)
l = df.shape[0]
bars = ax.patches
hatches = [".."] * l + ['//'] * l
for bar, hatch in zip(bars, hatches):
bar.set_hatch(hatch)
df[['efficiency (naive)']].plot.line(ax=ax2, legend=False, marker='^', markeredgecolor='w', markeredgewidth=0, \
color=['mediumaquamarine'], path_effects=[pe.Stroke(linewidth=3, foreground='w'), pe.Normal()])
df[['efficiency (correlated)']].plot.line(ax=ax2, legend=False, marker='o', markeredgecolor='w', markeredgewidth=0, \
color=['mediumpurple'], path_effects=[pe.Stroke(linewidth=3, foreground='w'), pe.Normal()])
ax.set_xticklabels(df[param])
ax.set_ylabel('sensitivity')
ax.set_ylim(0.6)
ax2.set_ylabel('efficiency')
ax2.set_ylim(1) if param in ['prevalence', 'pool size'] else ax2.set_ylim(4.5)
if param == 'FNR':
ax.set_xlabel('population-average individual test FNR')
elif param == 'household dist':
ax.set_xlabel('household size distribution')
else:
ax.set_xlabel(param)
h, l = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax.legend(h + h2, l + l2, loc='lower left', bbox_to_anchor=(0, 1.02, 0.6, 1.02), ncol=2)
fig.savefig('../figs/experiment_2/sensitivity_plots/sensitivity_for_{}_new.pdf'.format(param), bbox_inches='tight', dpi=600)
plt.clf()
return
def generate_pareto_fontier_plots():
dir = '../results/experiment_2/pareto_analysis_2000/'
aggregate_results = {}
for filename in os.listdir(dir):
if filename == ".DS_Store":
continue
parts = re.split('=|[.](?!\d)|_', filename)
prev = float(parts[2])
pool_size = int(parts[4])
filedir = os.path.join(dir, filename)
with open(filedir) as f:
results = np.loadtxt(f)
avgs = np.mean(results, axis=0)
aggregate_results[(prev, pool_size)] = avgs
df_agg = pd.DataFrame.from_dict(aggregate_results, orient='index', columns=['fnr (naive)', 'fnr (correlated)', 'eff (naive)', 'eff (correlated)'])
df_agg.index = pd.MultiIndex.from_tuples(df_agg.index, names=['prevalence', 'pool size'])
df_agg = df_agg.reset_index()
df_agg = df_agg.sort_values(by=['prevalence', 'pool size'])
df_agg['sn (naive)'] = 1 - df_agg['fnr (naive)']
df_agg['sn (correlated)'] = 1 - df_agg['fnr (correlated)']
for prev in df_agg['prevalence'].unique():
df = df_agg[df_agg['prevalence'] == prev]
ax = df.sort_values(by='pool size').plot(x='sn (naive)', y = 'eff (naive)', sort_columns=True, color='mediumpurple', marker='^', style='--')
df.sort_values(by='pool size').plot(x='sn (correlated)', y = 'eff (correlated)', sort_columns=True, ax=ax, color='mediumaquamarine', marker='o', style='-')
texts = []
for i, point in df.iterrows():
texts.append(ax.text(point['sn (naive)'], point['eff (naive)'], str(int(point['pool size'])), color='dimgrey'))
texts.append(ax.text(point['sn (correlated)'], point['eff (correlated)'], str(int(point['pool size'])), color='dimgrey'))
adjust_text(texts, only_move={'points':'y', 'texts':'xy'})
plt.legend(['naive', 'correlated'])
plt.xlabel('Sensitivity = 1 - FNR')
plt.ylabel('Efficiency')
plt.title('Tradeoff between test efficiency and sensitivity\n under prevalence = {}'.format(prev))
plt.grid(True, ls=':')
plt.savefig('../figs/experiment_2/pareto_plots/pareto_for_prev_{}.pdf'.format(prev), format='pdf', dpi=600, bbox_inches='tight')
plt.close()
return
def generate_heatmap_plots():
dir = '../results/experiment_2/pareto_analysis_2000/'
aggregate_results = {}
for filename in os.listdir(dir):
if filename == ".DS_Store":
continue
parts = re.split('=|[.](?!\d)|_', filename)
prev = float(parts[2])
pool_size = int(parts[4])
filedir = os.path.join(dir, filename)
with open(filedir) as f:
results = np.loadtxt(f)
avgs = np.mean(results, axis=0)
aggregate_results[(prev, pool_size)] = avgs
df_agg = pd.DataFrame.from_dict(aggregate_results, orient='index', columns=['fnr (naive)', 'fnr (correlated)', 'eff (naive)', 'eff (correlated)'])
df_agg.index = pd.MultiIndex.from_tuples(df_agg.index, names=['prevalence', 'pool size'])
df_agg = df_agg.reset_index()
df_agg = df_agg.sort_values(by=['prevalence', 'pool size'])
df_agg['sn (naive)'] = 1 - df_agg['fnr (naive)']
df_agg['sn (correlated)'] = 1 - df_agg['fnr (correlated)']
df_agg['sn diff'] = (df_agg['sn (correlated)'] - df_agg['sn (naive)']) * 100
df_agg['eff diff'] = df_agg['eff (correlated)'] - df_agg['eff (naive)']
fig, [ax0, ax1] = plt.subplots(1, 2, figsize=(8, 4))
table_sn = pd.pivot_table(df_agg, values='sn diff', index=['prevalence'], columns=['pool size'])
# print(table_sn)
heatmap = ax0.pcolor(table_sn, cmap=cm.BuPu)
ax0.set_aspect('equal')
ax0.set_yticks(np.arange(0.5, len(table_sn.index), 1))
ax0.set_yticklabels(table_sn.index)
ax0.set_xticks(np.arange(0.5, len(table_sn.columns), 1))
ax0.set_xticklabels(table_sn.columns)
ax0.set_xlabel('pool size')
ax0.set_ylabel('prevalence')
ax0.set_title('Difference in sensitivity (%)')
fig.colorbar(heatmap, ax=ax0, orientation="horizontal", label="(%)")
textcolors = ["k", "w"]
threshold = 0.049 * 100
for i, prev in enumerate(table_sn.index):
for j, pool_size in enumerate(table_sn.columns):
text = ax0.text(j+0.5, i+0.5, "{:.1f}".format(table_sn.iloc[i,j]),
ha="center", va="center", color=textcolors[table_sn.iloc[i, j] > threshold], size=7)
table_eff = pd.pivot_table(df_agg, values='eff diff', index=['prevalence'], columns=['pool size'])
heatmap = ax1.pcolor(table_eff, cmap=cm.YlGn)
ax1.set_aspect('equal')
ax1.set_yticks(np.arange(0.5, len(table_eff.index), 1))
ax1.set_yticklabels(table_eff.index)
ax1.set_xticks(np.arange(0.5, len(table_eff.columns), 1))
ax1.set_xticklabels(table_eff.columns)
ax1.set_xlabel('pool size')
ax1.set_ylabel('prevalence')
ax1.set_title('Difference in efficiency')
fig.colorbar(heatmap, ax=ax1, orientation="horizontal")
textcolors = ["k", "w"]
threshold = 0.675
for i, prev in enumerate(table_eff.index):
for j, pool_size in enumerate(table_eff.columns):
text = ax1.text(j+0.5, i+0.5, "{:.2f}".format(table_eff.iloc[i,j]).replace("0.", "."), \
ha="center", va="center", color=textcolors[table_eff.iloc[i, j] > threshold], size=7)
fig.tight_layout()
fig.savefig('../figs/experiment_2/pareto_plots/heapmap_for_fnr_and_eff_2000.pdf', format='pdf', dpi=600, bbox_inches='tight')
plt.clf()
return
def generate_test_consumption_results():
dir = '../results/experiment_2/pareto_analysis_2000/'
aggregate_results = {}
for filename in os.listdir(dir):
if filename == ".DS_Store":
continue
parts = re.split('=|[.](?!\d)|_', filename)
prev = float(parts[2])
pool_size = int(parts[4])
filedir = os.path.join(dir, filename)
with open(filedir) as f:
results = np.loadtxt(f)
avgs = np.mean(results, axis=0)
aggregate_results[(prev, pool_size)] = avgs
df_agg = pd.DataFrame.from_dict(aggregate_results, orient='index', columns=['fnr (naive)', 'fnr (correlated)', 'eff (naive)', 'eff (correlated)'])
df_agg.index = pd.MultiIndex.from_tuples(df_agg.index, names=['prevalence', 'pool size'])
df_agg = df_agg.reset_index()
df_agg = df_agg.sort_values(by=['prevalence', 'pool size'])
df_agg['sn (naive)'] = 1 - df_agg['fnr (naive)']
df_agg['sn (correlated)'] = 1 - df_agg['fnr (correlated)']
df_results = pd.DataFrame(columns=['prevalence', 'opt pool size (naive)', 'opt sn * eff (naive)',
'opt pool size (correlated)', 'opt sn * eff (correlated)', 'tests needed reduction'])
for prev in df_agg['prevalence'].unique():
df = df_agg[df_agg['prevalence'] == prev].reset_index()
df['sn*eff (naive)'] = df['sn (naive)'] * df['eff (naive)']
df['sn*eff (correlated)'] = df['sn (correlated)'] * df['eff (correlated)']
opt_pool_size_naive = df['pool size'].iloc[df['sn*eff (naive)'].idxmax()]
opt_sn_eff_prod_naive = df['sn*eff (naive)'].max()
opt_pool_size_corr = df['pool size'].iloc[df['sn*eff (correlated)'].idxmax()]
opt_sn_eff_prod_corr = df['sn*eff (correlated)'].max()
if prev == 0.01:
print('naive:', df[['prevalence', 'pool size', 'sn (naive)', 'eff (naive)']]\
[(df['pool size'] == opt_pool_size_naive)])
print('correlated: ', df[['prevalence', 'pool size', 'sn (correlated)', 'eff (correlated)']]\
[(df['pool size'] == opt_pool_size_corr)])
test_needed_reduction = 1 - opt_sn_eff_prod_naive / opt_sn_eff_prod_corr
# (1 / naive - 1 / corr) / (1 / corr) = corr / naive - 1 # increase when using NP
# (1 / naive - 1 / corr) / (1 / naive) = 1 - naive/corr # reduction when using CP
results = np.array([prev, opt_pool_size_naive,
opt_sn_eff_prod_naive, opt_pool_size_corr, opt_sn_eff_prod_corr, test_needed_reduction]).round(3)
df_results = df_results.append(dict(zip(df_results.columns, results)), ignore_index=True)
df_results.to_csv('../results/experiment_2/opt_pool_size_test_reduction_2000.csv', index=False)
return
def generate_bound_in_theorem_2_results(n_iters=1000000, n_resamples=10000):
filename = f'../results/PCR_tests/bounds_in_theorem_2_alternative_{n_iters}.csv'
dir = f'../results/PCR_tests/bound_analysis_{n_iters}'
df = | pd.read_csv(filename) | pandas.read_csv |
import os
import sys
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
from dateutil.relativedelta import relativedelta
pkg_dir = os.path.join(os.path.dirname(__file__),'..')
sys.path.append(pkg_dir)
from silverpieces.functions import *
def fill_time_index(nd_array):
td = nd_array.shape[0]
for i in range(td):
nd_array[i,:,:] = i
def fill_year(nd_array):
start_time = datetime(2001,1,1)
td = nd_array.shape[0]
for i in range(td):
nd_array[i,:,:] = (start_time + relativedelta(days=i)).year - start_time.year
def create_daily_sp_cube(start_time, end_time, nx=2, ny=3, fun_fill=fill_time_index):
start_time = pd.to_datetime(start_time)
end_time = pd.to_datetime(end_time)
tdim = pd.date_range(start=start_time, end=end_time, freq='D')
xdim = np.arange(0, nx * 0.5 - 1e-2, 0.5)
ydim = np.arange(0.25, 0.25 + ny * 0.5 - 1e-2, 0.5)
x = np.empty([len(tdim), ny, nx])
fun_fill(x)
y = xr.DataArray(x,
coords=[tdim,ydim,xdim],
dims=['time', 'lat', 'lon'],
name='test_daily_data')
return y
def test_num_year_detection():
assert max_shifting_years('2001-01-01', '2003-12-31', '2001-01-01', '2001-12-31') == 2
assert max_shifting_years('2001-01-01', '2003-12-31', '2001-01-01', '2001-10-31') == 2
assert max_shifting_years('2001-01-01', '2003-12-31', '2001-03-01', '2001-12-31') == 2
#
assert max_shifting_years('2001-01-01', '2003-12-31', '2001-01-01', '2002-12-31') == 1
assert max_shifting_years('2001-01-01', '2003-12-30', '2001-01-01', '2002-12-31') == 0
# If the windows is less than a calendar year, it should still find the right max shift
assert max_shifting_years('2007-01-01', '2018-12-31', '2001-01-01', '2001-12-31') == 11
# 2016 a leap year...
assert max_shifting_years('2007-01-01', '2018-12-31', '2016-01-01', '2016-12-31') == 11
def test_periods_stat_yearly_stats():
start_time = pd.to_datetime('2001-01-01')
end_time = pd.to_datetime('2002-12-31')
x = create_daily_sp_cube('2001-01-01', '2009-12-31', nx=2, ny=3, fun_fill=fill_year)
s = SpatialTemporalDataArrayStat()
y = s.periods_stat_yearly(x, '2001-01-01', '2002-12-31')
assert len(y.time) == (9 - 2 + 1)
tdim = y[s.time_dimname].values
assert pd.to_datetime(tdim[0] ) == end_time
assert pd.to_datetime(tdim[-1]) == | pd.to_datetime('2009-12-31') | pandas.to_datetime |
# coding: utf-8
# In[2]:
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
from keras.layers import Bidirectional, Input, LSTM, Dense, Activation, Conv1D, Flatten, Embedding, MaxPooling1D, Dropout
#from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras import optimizers
from gensim.models import Word2Vec
from keras.models import Sequential, Model
import pandas as pd
import numpy as np
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from sklearn.utils import shuffle
import pickle
from sklearn.model_selection import train_test_split
from autocorrect import spell
import spacy
from spacy.gold import GoldParse
nlp = spacy.load('en')
import re
from sklearn.utils import shuffle
import keras
# In[3]:
df = pd.read_csv('train.csv')
# In[4]:
pred_cols = ['toxic','severe_toxic','obscene','threat','insult','identity_hate']
# In[5]:
df['total_classes'] = df['toxic']+df['severe_toxic']+df['obscene']+df['threat']+df['insult']+df['identity_hate']
# In[6]:
df['comment_text'] = df['comment_text'].apply(lambda x : x.replace("'", "").replace('"',''))
# In[7]:
def correct_spelling(text):
words = text_to_word_sequence(text)
#print (words)
words = [spell(w) for w in words]
return " ".join(words)
# In[8]:
#df['comment_text'] = df['comment_text'].apply(lambda x : correct_spelling(x))
# In[9]:
df['comment_text'] = df['comment_text'].apply(lambda x: re.sub('[0-9]','',x))
# In[10]:
def replace_unknown_words_with_UNK(sentence):
words = text_to_word_sequence(sentence)
words = [get_word(w) for w in words]
return " ".join(words)
# In[11]:
def get_word(w):
if w in tokenizer.word_index:
return w
else:
return "unk"
# In[12]:
def train_tokenizer(texts):
tokenizer = Tokenizer()
sent_list = texts
tokenizer.fit_on_texts(sent_list)
return tokenizer
# In[13]:
def load_glove_embedding(glove_path):
word2emb = {}
with open(glove_path, "rb") as fglove:
for line in fglove:
cols = line.strip().split()
word = cols[0]
embedding = np.array(cols[1:], dtype="float32")
word2emb[word] = embedding
return word2emb
# In[14]:
def generate_word2vec(comments):
sents = [text_to_word_sequence(s) for s in comments]
vector = Word2Vec(sents, size=100, iter=50, min_count=1)
return vector
# In[37]:
comment_list = df['comment_text'].tolist()
glove_file = 'glove.840B.300d.txt'
#glove_file = 'glove.6B.100d.txt'
emb_matrix = load_glove_embedding(glove_file)
#emb_matrix = generate_word2vec(comment_list)
max_len = 300
comment_list.append("unk")
tokenizer = train_tokenizer(comment_list)
n_classes = 1
# ### Replacing all the unknown words with UNK. This will have no impact on training as all the words are known
# In[38]:
df['comment_text'] = df['comment_text'].apply(lambda x : replace_unknown_words_with_UNK(x))
# In[17]:
print ("The vocabulary size is: {0}".format(len(tokenizer.word_index)))
print (tokenizer.texts_to_sequences([replace_unknown_words_with_UNK("DFLSDKJFLS ADFSDF was Infosys CEO")]))
# In[18]:
def clean_up(dfin):
dfin['comment_text'] = dfin['comment_text'].apply(lambda x : str(x).replace("'", "").replace('"',''))
dfin['comment_text'] = dfin['comment_text'].apply(lambda x: re.sub('[0-9]','',x))
#dfin['comment_text'] = dfin['comment_text'].apply(lambda x : replace_unknown_words_with_UNK(x))
return dfin
# In[19]:
class_count = []
for col in pred_cols:
class_count.append((col,len(df[df[col]==1])))
print (class_count)
# In[20]:
def get_stratified_train(df, oversample=None):
df_all_toxic = df[np.logical_and(df['toxic'] ==1 , df['total_classes'] ==1)]
df_all_severe_toxic = df[np.logical_and(df['severe_toxic'] ==1 , df['total_classes'] <=6)]
df_all_obscene = df[np.logical_and(df['obscene'] ==1 , df['total_classes'] <=6)]
df_all_threat = df[np.logical_and(df['threat'] ==1 , df['total_classes'] <=6)]
df_all_insult = df[np.logical_and(df['insult'] ==1 , df['total_classes'] <=6)]
df_all_identity_hate = df[np.logical_and(df['identity_hate'] ==1 , df['total_classes'] <=6)]
df_all_rest =df[df['total_classes'] ==0]
print("Counts:- toxic:{0}, severe_toxic:{1}, obscene:{2}, threat:{3}, insult:{4}, identity_hate:{5}, rest:{6}".format(len(df_all_toxic),len(df_all_severe_toxic),len(df_all_obscene),len(df_all_threat),len(df_all_insult),len(df_all_identity_hate), len(df_all_rest)))
X_train_toxic, X_test_toxic = train_test_split(df_all_toxic, test_size=0.10, random_state=42)
X_train_severe_toxic, X_test_severe_toxic = train_test_split(df_all_severe_toxic, test_size=0.1, random_state=42)
X_train_obscene, X_test_obscene = train_test_split(df_all_obscene, test_size=0.05, random_state=42)
X_train_threat, X_test_threat = train_test_split(df_all_threat, test_size=0.05, random_state=42)
X_train_insult, X_test_insult = train_test_split(df_all_insult, test_size=0.10, random_state=42)
X_train_identity_hate, X_test_identity_hate = train_test_split(df_all_identity_hate, test_size=0.1, random_state=42)
X_train_rest, X_test_rest = train_test_split(df_all_rest, test_size=0.10, random_state=42)
print("Train Counts:- toxic:{0}, severe_toxic:{1}, obscene:{2}, threat:{3}, insult:{4}, identity_hate:{5}, rest:{6}".format(len(X_train_toxic),len(X_train_severe_toxic),len(X_train_obscene),len(X_train_threat),len(X_train_insult),len(X_train_identity_hate), len(X_train_rest)))
print("Test Counts:- toxic:{0}, severe_toxic:{1}, obscene:{2}, threat:{3}, insult:{4}, identity_hate:{5}, rest:{6}".format(len(X_test_toxic),len(X_test_severe_toxic),len(X_test_obscene),len(X_test_threat),len(X_test_insult),len(X_test_identity_hate), len(X_test_rest)))
X_train = | pd.concat([X_train_toxic, X_train_severe_toxic, X_train_obscene, X_train_threat, X_train_insult, X_train_identity_hate, X_train_rest]) | pandas.concat |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in Javascript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from typing import Any, Callable, Dict, Optional, Union
import pandas as pd
from superset.utils.core import DTTM_ALIAS, extract_dataframe_dtypes, get_metric_name
def sql_like_sum(series: pd.Series) -> pd.Series:
"""
A SUM aggregation function that mimics the behavior from SQL.
"""
return series.sum(min_count=1)
def pivot_table(
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None
) -> Dict[Any, Any]:
"""
Pivot table.
"""
for query in result["queries"]:
data = query["data"]
df = pd.DataFrame(data)
form_data = form_data or {}
if form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
metrics = [get_metric_name(m) for m in form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Callable[[Any], Any]]] = {}
for metric in metrics:
aggfunc = form_data.get("pandas_aggfunc") or "sum"
if pd.api.types.is_numeric_dtype(df[metric]):
if aggfunc == "sum":
aggfunc = sql_like_sum
elif aggfunc not in {"min", "max"}:
aggfunc = "max"
aggfuncs[metric] = aggfunc
groupby = form_data.get("groupby") or []
columns = form_data.get("columns") or []
if form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=form_data.get("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
df = df[metrics]
# Display metrics side by side with each column
if form_data.get("combine_metric"):
df = df.stack(0).unstack().reindex(level=-1, columns=metrics)
# flatten column names
df.columns = [" ".join(column) for column in df.columns]
# re-arrange data into a list of dicts
data = []
for i in df.index:
row = {col: df[col][i] for col in df.columns}
row[df.index.name] = i
data.append(row)
query["data"] = data
query["colnames"] = list(df.columns)
query["coltypes"] = extract_dataframe_dtypes(df)
query["rowcount"] = len(df.index)
return result
def list_unique_values(series: pd.Series) -> str:
"""
List unique values in a series.
"""
return ", ".join(set(str(v) for v in | pd.Series.unique(series) | pandas.Series.unique |
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from pandas.core.frame import DataFrame
from torch.utils.data import Dataset, DataLoader
import torch
import pickle
import datetime
class data_loader(Dataset):
def __init__(self, df_feature, df_label, df_label_reg, t=None):
assert len(df_feature) == len(df_label)
assert len(df_feature) == len(df_label_reg)
# df_feature = df_feature.reshape(df_feature.shape[0], df_feature.shape[1] // 6, df_feature.shape[2] * 6)
self.df_feature=df_feature
self.df_label=df_label
self.df_label_reg = df_label_reg
self.T=t
self.df_feature=torch.tensor(
self.df_feature, dtype=torch.float32)
self.df_label=torch.tensor(
self.df_label, dtype=torch.float32)
self.df_label_reg=torch.tensor(
self.df_label_reg, dtype=torch.float32)
def __getitem__(self, index):
sample, target, label_reg =self.df_feature[index], self.df_label[index], self.df_label_reg[index]
if self.T:
return self.T(sample), target
else:
return sample, target, label_reg
def __len__(self):
return len(self.df_feature)
def create_dataset(df, station, start_date, end_date, mean=None, std=None):
data=df[station]
feat, label, label_reg =data[0], data[1], data[2]
referece_start_time=datetime.datetime(2013, 3, 1, 0, 0)
referece_end_time=datetime.datetime(2017, 2, 28, 0, 0)
assert (pd.to_datetime(start_date) - referece_start_time).days >= 0
assert (pd.to_datetime(end_date) - referece_end_time).days <= 0
assert (pd.to_datetime(end_date) - pd.to_datetime(start_date)).days >= 0
index_start=(pd.to_datetime(start_date) - referece_start_time).days
index_end=(pd.to_datetime(end_date) - referece_start_time).days
feat=feat[index_start: index_end + 1]
label=label[index_start: index_end + 1]
label_reg=label_reg[index_start: index_end + 1]
# ori_shape_1, ori_shape_2=feat.shape[1], feat.shape[2]
# feat=feat.reshape(-1, feat.shape[2])
# feat=(feat - mean) / std
# feat=feat.reshape(-1, ori_shape_1, ori_shape_2)
return data_loader(feat, label, label_reg)
def create_dataset_shallow(df, station, start_date, end_date, mean=None, std=None):
data=df[station]
feat, label, label_reg =data[0], data[1], data[2]
referece_start_time=datetime.datetime(2013, 3, 1, 0, 0)
referece_end_time=datetime.datetime(2017, 2, 28, 0, 0)
assert (pd.to_datetime(start_date) - referece_start_time).days >= 0
assert (pd.to_datetime(end_date) - referece_end_time).days <= 0
assert (pd.to_datetime(end_date) - pd.to_datetime(start_date)).days >= 0
index_start=(pd.to_datetime(start_date) - referece_start_time).days
index_end=(pd.to_datetime(end_date) - referece_start_time).days
feat=feat[index_start: index_end + 1]
label=label[index_start: index_end + 1]
label_reg=label_reg[index_start: index_end + 1]
# ori_shape_1, ori_shape_2=feat.shape[1], feat.shape[2]
# feat=feat.reshape(-1, feat.shape[2])
# feat=(feat - mean) / std
# feat=feat.reshape(-1, ori_shape_1, ori_shape_2)
return feat, label_reg
def get_dataset_statistic(df, station, start_date, end_date):
data=df[station]
feat, label =data[0], data[1]
referece_start_time=datetime.datetime(2013, 3, 1, 0, 0)
referece_end_time=datetime.datetime(2017, 2, 28, 0, 0)
assert (pd.to_datetime(start_date) - referece_start_time).days >= 0
assert ( | pd.to_datetime(end_date) | pandas.to_datetime |
import pandas as pd
from src.utility.file_utility import get_directory_files, create_directory, copy_file
from src.utility.system_utility import progress_bar
from src.utility.image_utility import load_image, crop_roi, save_image
from sklearn.model_selection import train_test_split
def get_labels(n_labels, as_string=True):
if as_string:
return ['0000' + str(i) if i < 10 else '000' + str(i) for i in range(n_labels)]
else:
return [int(i) for i in range(n_labels)]
def get_image_label(label_code, labels):
return [1 if label_code == i else 0 for i in labels]
def create_traing_data_table(folder_path, output_path, img_ext='ppm'):
directories = get_directory_files(folder_path)
directories.sort()
datatable = pd.DataFrame(columns=['image_path', 'label', 'roi_x1', 'roi_y1', 'roi_x2', 'roi_y2'])
total_count = 0
for label in directories:
current_directory = label
path_label_folder = folder_path + '/' + current_directory
images = [image for image in get_directory_files(path_label_folder) if img_ext in image]
images.sort()
category_df = pd.read_csv(path_label_folder + '/GT-' + current_directory + '.csv', sep=';')
count = 0
for img in images:
img_path = path_label_folder + '/' + img
category_df_row = category_df.iloc[count]
datatable.loc[total_count] = [img_path, label, category_df_row['Roi.X1'], category_df_row['Roi.Y1'],
category_df_row['Roi.X2'], category_df_row['Roi.Y2']]
count += 1
total_count += 1
progress_bar(count, len(images), 'Processing label: ' + label + ' with ' + str(len(images)) + ' images')
print()
datatable.to_csv(output_path, index=False, header=True)
def split_train_data(train_out_folder, validation_out_folder, dataset_path, validation_size=0.25, labels=43, roi_folder_suffix='_roi'):
dataframe = pd.read_csv(dataset_path)
x_train, x_valid, y_train, y_valid = train_test_split(dataframe['image_path'].values, dataframe['label'].values,
test_size=validation_size, shuffle=True)
for i in range(labels):
if i < 10:
folder = '0000' + str(i)
else:
folder = '000' + str(i)
create_directory(train_out_folder + '/' + folder)
create_directory(validation_out_folder + '/' + folder)
# Simply move images
copy_images(x_train, y_train, train_out_folder)
print()
copy_images(x_valid, y_valid, validation_out_folder)
# Save images only ROI
save_images_roi(x_train, y_train, train_out_folder + roi_folder_suffix, dataframe)
print()
save_images_roi(x_valid, y_valid, validation_out_folder + roi_folder_suffix, dataframe)
def copy_images(x, y, output_path):
for i in range(x.shape[0]):
label = y[i]
if label < 10:
folder = '0000' + str(label)
else:
folder = '000' + str(label)
file_name = x[i].split('/')[-1]
copy_file(x[i], output_path + '/' + folder + '/' + file_name)
progress_bar(i, x.shape[0], 'Copying ' + str(x.shape[0]) + ' images in: ' + output_path)
def prepare_test_data(starting_folder, output_folder, data_frame_path, sep=';', label_col='ClassId', labels=43, roi_folder_suffix='_roi'):
files = get_directory_files(starting_folder)
files.sort()
data_frame = | pd.read_csv(data_frame_path, sep=sep) | pandas.read_csv |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
| tm.assert_index_equal(res, exp) | pandas.util.testing.assert_index_equal |
# Copyright 2021 VicEdTools authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for importing data into OARS."""
from datetime import datetime
import re
import numpy as np
import pandas as pd
# Todo: Transition from only adding maths/english class codes to adding all
# relevant class codes as tags from the student enrolment data
def class_selector(class_string: str) -> pd.Series:
'''Identifies whether a given class name is an english or maths class.
Args:
class_string: a class code string
Returns:
A pandas Series containing two items, "Maths"/"English" and the class code.
'''
# maths pattern
pattern = "(?P<class_code>[789]MA[BEFG][0-9]|10MA[PQRSTU][X]?[0-9]|11FM[PQRSTU][0-9])"
match = re.match(pattern, class_string)
if match:
class_code = match.group("class_code")
return pd.Series(["Maths", class_code])
# english/eal pattern
pattern = "(?P<class_code>[789]EN[BEFG][0-9]|10EN[PQRSTU][0-9]|[789]EAL[BEFG][0-9]?|10EAL[PQRSTU][0-9]?)"
match = re.match(pattern, class_string)
if match:
class_code = match.group("class_code")
return pd.Series(["English", class_code])
# else
return pd.Series([None, None])
def student_imports(student_details_file: str, student_enrolment_file: str,
current_students_file: str, new_students_file: str,
update_students_file: str) -> None:
'''Creates files to update the OARS student database.
Creates two separate files, one to update the details of existing students
in the database and one to add new students.
Args:
student_details_file: a student details export csv from Compass. Can be
downloaded using vicedtools.compass.export_student_details or directly
from https://[schoolID].compass.education/Services/FileDownload/CsvRequestHandler?type=37
student_enrolment_file: a student enrolment file exported from Compass.
Can be downloaded using vicedtools.compass.export_student_enrolments
current_students_file: a current students export from OARS
new_students_file: the filename to save the new students import for OARS
update_students_file: the filename to save the update students import for OARS
'''
existing_students_df = pd.read_excel(current_students_file)
student_details_df = | pd.read_csv(student_details_file, dtype=np.str) | pandas.read_csv |
# -*- coding: utf-8 -*-
import torch
from pytorch_fid import fid_score
import pandas as pd
from glob import glob
import os, argparse
import numpy as np
# %%
device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')
batch_size = 50
dim = 2048
#path1 = './Datasets/Zurich_patches/fold2/patch_tlevel1/A/test'
#path2 = './Datasets/Zurich_patches_fake/fold2/patch_tlevel1/cyc_A'
# %%
#fid_value = fid_score.calculate_fid_given_paths(
# [path1, path2],
# batch_size, device, dim)
# %%
def calculate_FIDs(dataset, fold=1):
# dataset='Zurich'
# fold=1
assert dataset in ['Balvan', 'Eliceiri', 'Zurich'], "dataset must be in ['Balvan', 'Eliceiri', 'Zurich']"
if dataset == 'Eliceiri':
dataroot_real = f'./Datasets/{dataset}_patches'
dataroot_fake = f'./Datasets/{dataset}_patches_fake'
dataroot_train = f'./Datasets/{dataset}_temp'
else:
dataroot_real = f'./Datasets/{dataset}_patches/fold{fold}'
dataroot_fake = f'./Datasets/{dataset}_patches_fake/fold{fold}'
dataroot_train = f'./Datasets/{dataset}_temp/fold{fold}'
gan_names = ['train2testA', 'train2testB', 'testA', 'testB', 'B2A',
'cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir']
# gan_names = ['cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir', 'B2A']
# csv information
header = [
'Dataset', 'Fold', 'Tlevel', 'GAN_name', 'Path_fake', 'Path_real',
'FID',
]
df = pd.DataFrame(columns=header)
row_dict = {'Dataset': dataset, 'Fold': fold}
for tlevel in [int(tl[-1]) for tl in glob(f'{dataroot_fake}/patch_tlevel*')]:
row_dict['Tlevel'] = tlevel
for gan_name in gan_names:
row_dict['GAN_name'] = gan_name
if gan_name in ['train2testA', 'train2testB']:
row_dict['Path_fake'] = f'{dataroot_train}/{gan_name[-1]}/train/'
row_dict['Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
elif gan_name in ['testA', 'testB']:
row_dict['Path_fake'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
row_dict['Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
elif gan_name == 'comir':
row_dict['Path_fake'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}_A/'
row_dict['Path_real'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}_B/'
elif gan_name == 'B2A':
row_dict['Path_fake'] = f'{dataroot_real}/patch_tlevel{tlevel}/A/test/'
row_dict['Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/B/test/'
else:
row_dict['Path_fake'] = f'{dataroot_fake}/patch_tlevel{tlevel}/{gan_name}/'
row_dict['Path_real'] = f'{dataroot_real}/patch_tlevel{tlevel}/{gan_name[-1]}/test/'
row_dict['FID'] = fid_score.calculate_fid_given_paths(
[ row_dict['Path_fake'], row_dict['Path_real'] ],
batch_size, device, dim)
df = df.append(row_dict, ignore_index=True)
result_dir = dataroot_fake
if not os.path.exists(result_dir):
os.makedirs(result_dir)
df.to_csv(f'{result_dir}/FIDs.csv')
return
# %%
def calculate_FIDs_3D(dataset, fold=1):
# dataset='RIRE'
# fold=1
assert dataset in ['RIRE'], "dataset must be in ['RIRE']"
dataroot_real = f'./Datasets/{dataset}_patches_forFID/real/fold{fold}'
dataroot_fake = f'./Datasets/{dataset}_patches_forFID/fake/fold{fold}'
gan_names = ['cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir', 'B2A']
# csv information
header = [
'Dataset', 'Fold', 'Tlevel', 'GAN_name', 'Path_fake', 'Path_real',
'FID',
]
df = pd.DataFrame(columns=header)
row_dict = {'Dataset': dataset, 'Fold': fold}
row_dict['Tlevel'] = 1
for gan_name in gan_names:
row_dict['GAN_name'] = gan_name
if gan_name == 'comir':
row_dict['Path_fake'] = f'{dataroot_fake}/{gan_name}_A/'
row_dict['Path_real'] = f'{dataroot_fake}/{gan_name}_B/'
elif gan_name == 'B2A':
row_dict['Path_fake'] = f'{dataroot_real}/A/test/'
row_dict['Path_real'] = f'{dataroot_real}/B/test/'
else:
row_dict['Path_fake'] = f'{dataroot_fake}/{gan_name}/'
row_dict['Path_real'] = f'{dataroot_real}/{gan_name[-1]}/test/'
row_dict['FID'] = fid_score.calculate_fid_given_paths(
[ row_dict['Path_fake'], row_dict['Path_real'] ],
batch_size, device, dim)
df = df.append(row_dict, ignore_index=True)
result_dir = dataroot_fake
if not os.path.exists(result_dir):
os.makedirs(result_dir)
df.to_csv(f'{result_dir}/FIDs.csv')
return
# %%
def make_FID_success_table(dataset, preprocess='nopre'):
# dataset='Zurich'
# fold=1
assert dataset in ['Balvan', 'Eliceiri', 'Zurich'], "dataset must be in ['Balvan', 'Eliceiri', 'Zurich']"
if dataset == 'Eliceiri':
dataroot_real = f'./Datasets/{dataset}_patches'
path_FIDcsv = f'./Datasets/{dataset}_patches_fake'
w = 834
folds = ['']
else:
dataroot_real = f'./Datasets/{dataset}_patches/fold{{fold}}'
path_FIDcsv = f'./Datasets/{dataset}_patches_fake/fold*'
w = 300
folds = [1, 2, 3]
def success_rate(patches_dir, method, gan_name='', preprocess='nopre', mode='b2a'):
if gan_name in ['A2A', 'B2B', 'B2A']:
gan_name = ''
# read results
dfs = [pd.read_csv(csv_path) for csv_path
in glob(f'{patches_dir}/patch_tlevel*/results/{method+gan_name}_{mode}_{preprocess}.csv')]
whole_df = pd.concat(dfs)
n_success = whole_df['Error'][whole_df['Error'] <= w*0.02].count()
rate_success = n_success / len(whole_df)
# print(f'{method+gan_name}_{preprocess}', rate_success)
return rate_success
# gan_names = ['testA', 'testB', 'B2A',
# 'cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir']
gan_names = ['cyc_A', 'cyc_B', 'drit_A', 'drit_B', 'p2p_A', 'p2p_B', 'star_A', 'star_B', 'comir', 'B2A']
# csv information
header = [
'Method', 'Dataset',
'FID_mean', 'FID_STD',
'Success_aAMD_mean', 'Success_aAMD_STD',
'Success_SIFT_mean', 'Success_SIFT_STD',
]
df = | pd.DataFrame(columns=header) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
target = 'scale'
# IP
plot_mode = 'all_in_one'
obj = 'occ'
# Port
flow_dir = 'all'
port_dir = 'sys'
user_plot_pr = ['TCP']
user_plot_pr = ['UDP']
port_hist = pd.DataFrame({'A' : []})
user_port_hist = pd.DataFrame({'A' : []})
def acf(x, length=10):
return np.array([1]+[np.corrcoef(x[:-i], x[i:])[0,1] \
for i in range(1, length)])
def scale_check(data_idx, plot=False):
files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
names = ['stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
scale_list = []
for col in ['byt', 'pkt']:
scale_list.append(col)
scale_list.append(str(np.min(df[col])))
scale_list.append(str(np.log(np.max(df[col]))))
scale_list.append(';')
print(files[data_idx], ':', (' '.join(scale_list)))
def pr_distribution(data_idx, plot=False):
files = ['stan','stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
names = ['stan_fwd','stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
# pr marginal distribution
pr_series = df['pr'].value_counts()
print(names[data_idx], pr_series)
ct = [0, 0, 0]
for i in pr_series.keys():
if i == 'TCP':
ct[0] += pr_series[i]
elif i == 'UDP':
ct[1] += pr_series[i]
else:
ct[2] += pr_series[i]
ct2 = [x/sum(ct) for x in ct]
print(ct2)
with open('results/pr/pr_marginal.csv', 'a') as out:
out.write(','.join([names[data_idx], str(ct2[0]), str(ct2[1]), str(ct2[2]), '\n']))
# prob of spec ports
# http 80/tcp
# https 443/tcp, 443/udp
# ssh 22/tcp
# DNS Service 53
# FTP 21/tcp
# ob_ports = [80, 443, 22, 53, 21]
# for ob_q in ob_ports:
# df_ = df[df['dp'] == ob_q]
# print(ob_q, len(df_.index)/len(df.index), len(df_.index), len(df.index))
# input()
def check_distribution(df, name, user=None):
# count = df_all.value_counts()
# df.hist = df.hist()
df = df.astype(int)
# print(df.value_counts(normalize=True))
global port_hist
global user_port_hist
if port_dir == 'sys':
df.hist(bins=1024) # s is an instance of Series
# plt.plot(df.value_counts().index, df.value_counts().values)
plt.savefig('./results/ports/%s/%s.png' % (port_dir, name))
plt.clf()
port_hist[name+'_port'] = df.value_counts(normalize=True)[:10].index
port_hist[name+'_occ'] = df.value_counts(normalize=True)[:10].values
else:
l_p = []
l_o = []
bar_size = 6000
for i in range(1024, 65536, bar_size):
l_p.append(i)
l_o.append(len(df[(i<=df) & (df<i+bar_size)].index))
# print(df[(i<=df) & (df<i+bar_size)])
# print(i, i+bar_size)
# input()
# print(l_o, name)
l_o = [x/sum(l_o) for x in l_o]
if len(user_port_hist.columns) == 1:
user_port_hist[name+'_port'] = l_p
user_port_hist[name+'_occ'] = l_o
plt.plot(l_p, l_o)
plt.xlabel("user port")
plt.ylabel("probability")
plt.title("user port distribution")
# plt.xticks(x_pos, x)
# plt.savefig('./results/ports/%s/%s.png' % (port_dir, name))
# plt.clf()
print('plotted %s' % name)
def port_distribution(data_idx, plot=False):
files = ['stan','stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
names = ['stan_fwd','stan_b', 'stan_a', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc' or files[data_idx] == 'stan':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
for pr in ['TCP', 'UDP']:
df_pr = df[df['pr'] == pr]
if flow_dir == 'outgoing':
flows = df_pr[df_pr['sa'].str.startswith('42.219')]
elif flow_dir == 'incoming':
flows = df_pr[df_pr['da'].str.startswith('42.219')]
else:
flows = df_pr.dropna()
# outgoing_port = pd.concat([outgoing_flows['sp'], outgoing_flows['dp']], axis= 0)
# check_distribution(outgoing_port, files[data_idx]+'_outgoing')
# incoming_port = pd.concat([flows['sp'], flows['dp']], axis= 0)
if port_dir == 'sys':
incoming_port = flows[flows['dp']<1024]['dp']
check_distribution(incoming_port, names[data_idx]+'_'+ pr +'_'+flow_dir)
else:
user_port = flows[flows['dp']>=1024]['dp']
check_distribution(user_port, names[data_idx]+'_'+ pr +'_'+flow_dir, user=True)
def attribute_autocorr(data_idx, plot=False):
files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
# files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
df1 = df[['byt', 'pkt']]
# print(df1)
# input()
auto = acf(df1['byt'])
print(files[data_idx], auto)
if plot:
# df_plot = pd.read_csv('results/ip_power_law/volumn_%s.csv' % files[data_idx], header=None)
# print(df_plot)
# input()
plt.plot(auto)
# plt.plot(df_plot[1])
if plot_mode != 'all_in_one':
plt.savefig('results/ip_power_law/%s.png' % files[data_idx])
plt.clf()
def ip_volumne(data_idx, plot=False):
files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
# files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'real']
if files[data_idx] == 'real':
df = pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx])
elif files[data_idx] == 'stanc':
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0))
else:
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], 0), index_col=None)
li = [df]
for piece_idx in range(1, 5):
df = pd.read_csv("./postprocessed_data/%s/%s_piece%d.csv" % (files[data_idx], files[data_idx], piece_idx), index_col=None, header=0)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
df1 = df[['sa', 'da', 'byt']]
df2_s = df1[['sa', 'byt']]
df2_d = df1[['da', 'byt']]
df2_s.columns = ['ip', 'byt']
df2_d.columns = ['ip', 'byt']
df_all = pd.concat([df2_s, df2_d], axis= 0)
df_nolocal = df_all[~df_all['ip'].str.startswith('42.219')]
df_nolocal = df_nolocal.sample(1000000)
group_cols = df_nolocal.columns.tolist()
group_cols.remove('byt')
df_sum = df_nolocal.groupby(group_cols,as_index=False)['byt'].sum()
# print(df_sum)
# print(df_sum[df_sum['ip']=='172.16.58.3'])
# input()
count = df_sum['byt']#.value_counts()
s = count.sort_values(ascending=False)
# final_df = count.sort_values(by=['byt'], ascending=False)
# print(final_df)
# input()
print(files[data_idx], len(df.index), len(df_nolocal.index), len(df_sum.index), len(count.index))
s.to_csv('results/ip_power_law/volumn_%s.csv' % files[data_idx])
if plot:
df_plot = pd.read_csv('results/ip_power_law/volumn_%s.csv' % files[data_idx], header=None)
# print(df_plot)
# input()
plt.plot(np.log(df_plot[1]))
# plt.plot(df_plot[1])
if plot_mode != 'all_in_one':
plt.savefig('results/ip_power_law/%s.png' % files[data_idx])
plt.clf()
def ip_power_law(data_idx, plot=False):
files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'bsl1', 'bsl2', 'real']
files = ['stanc', 'arcnn_f90', 'wpgan', 'ctgan', 'real']
if files[data_idx] == 'real':
df = | pd.read_csv("./postprocessed_data/%s/day2_90user.csv" % files[data_idx]) | pandas.read_csv |
#!/usr/bin/env python
"""
DataExplore Application based on pandastable.
Created January 2014
Copyright (C) <NAME>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import absolute_import, print_function
import sys, datetime
try:
from tkinter import *
from tkinter.ttk import *
except:
from Tkinter import *
from ttk import *
if (sys.version_info > (3, 0)):
from tkinter import filedialog, messagebox, simpledialog
else:
import tkFileDialog as filedialog
import tkSimpleDialog as simpledialog
import tkMessageBox as messagebox
from collections import OrderedDict
import matplotlib
matplotlib.use('TkAgg', warn=False)
import pandas as pd
import re, os, platform, time
from .core import Table
from .data import TableModel
from .prefs import Preferences
from . import images, util, dialogs
from .dialogs import MultipleValDialog
from . import plugin
from .preferences import Prefs
class DataExplore(Frame):
"""DataExplore application using pandastable widget.
Args:
parent: parent tkinter Frame, default None
data: data, a pandas DataFrame
projfile: path to a project file, opened on launch
msgpack: path to a dataframe stored as msgpack, default None
"""
def __init__(self, parent=None, data=None, projfile=None, msgpack=None):
"""Initialize the application. """
self.parent=parent
if not self.parent:
Frame.__init__(self)
self.main=self.master
else:
self.main=Toplevel()
self.master=self.main
if getattr(sys, 'frozen', False):
#the application is frozen
self.modulepath = os.path.dirname(sys.executable)
else:
self.modulepath = os.path.dirname(__file__)
icon = os.path.join(self.modulepath,'dataexplore.gif')
img = PhotoImage(file=icon)
self.main.tk.call('wm', 'iconphoto', self.main._w, img)
# Get platform into a variable
self.currplatform = platform.system()
self.setConfigDir()
if not hasattr(self,'defaultsavedir'):
self.defaultsavedir = os.getcwd()
self.main.title('DataExplore')
self.createMenuBar()
self.discoverPlugins()
self.setupGUI()
self.setStyles()
self.clipboarddf = None
self.projopen = False
opts = {'layout':{'type':'checkbutton','default':'horizontal'}}
#self.prefs = Prefs('.dataexplore', opts=opts)
if data != None:
self.data = data
self.newProject(data)
elif projfile != None:
self.loadProject(projfile)
elif msgpack != None:
self.load_msgpack(msgpack)
else:
self.newProject()
self.main.protocol('WM_DELETE_WINDOW',self.quit)
self.main.lift()
return
def setStyles(self):
"""Set theme and widget styles"""
style = self.style = Style(self)
available_themes = self.style.theme_names()
plf = util.checkOS()
if plf == 'linux':
style.theme_use('default')
elif plf == 'darwin':
style.theme_use('clam')
self.bg = bg = self.style.lookup('TLabel.label', 'background')
style.configure('Horizontal.TScale', background=bg)
#set common background style for all widgets because of color issues
#if plf in ['linux','darwin']:
# self.option_add("*background", bg)
dialogs.applyStyle(self.menu)
return
def setConfigDir(self):
"""Set up config folder"""
homepath = os.path.join(os.path.expanduser('~'))
path = '.dataexplore'
self.configpath = os.path.join(homepath, path)
self.pluginpath = os.path.join(self.configpath, 'plugins')
if not os.path.exists(self.configpath):
os.mkdir(self.configpath)
os.makedirs(self.pluginpath)
return
def setupGUI(self):
"""Add all GUI elements"""
self.m = PanedWindow(self.main, orient=HORIZONTAL)
self.m.pack(fill=BOTH,expand=1)
self.nb = Notebook(self.main)
self.m.add(self.nb)
self.setGeometry()
return
def createMenuBar(self):
"""Create the menu bar for the application. """
self.menu=Menu(self.main)
self.file_menu={'01New Project':{'cmd': self.newProject},
'02Open Project':{'cmd': lambda: self.loadProject(asksave=True)},
'03Close':{'cmd':self.closeProject},
'04Save':{'cmd':self.saveProject},
'05Save As':{'cmd':self.saveasProject},
'06sep':'',
'07Import CSV':{'cmd':self.importCSV},
'08Import from URL':{'cmd':self.importURL},
'08Import Excel':{'cmd':self.importExcel},
'09Export CSV':{'cmd':self.exportCSV},
'10sep':'',
'11Quit':{'cmd':self.quit}}
if self.parent:
self.file_menu['08Return to Database']={'cmd':self.return_data}
self.file_menu=self.createPulldown(self.menu,self.file_menu)
self.menu.add_cascade(label='File',menu=self.file_menu['var'])
self.edit_menu={'01Undo Last Change':{'cmd': self.undo},
'02Copy Table':{'cmd': self.copyTable},
'03Table Preferences':{'cmd': self.currentTablePrefs},
}
self.edit_menu = self.createPulldown(self.menu,self.edit_menu)
self.menu.add_cascade(label='Edit',menu=self.edit_menu['var'])
self.sheet_menu={'01Add Sheet':{'cmd': lambda: self.addSheet(select=True)},
'02Remove Sheet':{'cmd': lambda: self.deleteSheet(ask=True)},
'03Copy Sheet':{'cmd':self.copySheet},
'04Rename Sheet':{'cmd':self.renameSheet},
'05Sheet Description':{'cmd':self.editSheetDescription}
}
self.sheet_menu=self.createPulldown(self.menu,self.sheet_menu)
self.menu.add_cascade(label='Sheet',menu=self.sheet_menu['var'])
self.view_menu={'01Zoom In':{'cmd': lambda: self._call('zoomIn')},
'02Zoom Out':{'cmd': lambda: self._call('zoomOut')},
'03sep':'',
'04Dark Theme':{'cmd': lambda: self._call('setTheme', name='dark')},
'05Bold Theme':{'cmd': lambda: self._call('setTheme', name='bold')},
'06Default Theme':{'cmd': lambda: self._call('setTheme', name='default')},
}
self.view_menu = self.createPulldown(self.menu,self.view_menu)
self.menu.add_cascade(label='View',menu=self.view_menu['var'])
self.table_menu={'01Describe Table':{'cmd':self.describe},
'02Convert Column Names':{'cmd':lambda: self._call('convertColumnNames')},
'03Convert Numeric':{'cmd': lambda: self._call('convertNumeric')},
'04Clean Data': {'cmd': lambda: self._call('cleanData')},
'05Find Duplicates': {'cmd': lambda: self._call('findDuplicates')},
'06Correlation Matrix':{'cmd': lambda: self._call('corrMatrix')},
'07Concatenate Tables':{'cmd':self.concat},
'08Table to Text':{'cmd': lambda: self._call('showasText')},
'09Table Info':{'cmd': lambda: self._call('showInfo')},
'10sep':'',
'11Transform Values':{'cmd': lambda: self._call('transform')},
'12Group-Aggregate':{'cmd': lambda: self._call('aggregate')},
'13Merge/Concat Tables': {'cmd': lambda: self._call('doCombine')},
'14Pivot Table':{'cmd': lambda: self._call('pivot')},
'15Melt Table':{'cmd': lambda: self._call('melt')},
'16Time Series Resampling':{'cmd': lambda: self._call('resample')}
}
self.table_menu=self.createPulldown(self.menu,self.table_menu)
self.menu.add_cascade(label='Tools',menu=self.table_menu['var'])
self.dataset_menu={'01Sample Data':{'cmd':self.sampleData},
'03Iris Data':{'cmd': lambda: self.getData('iris.csv')},
'03Tips Data':{'cmd': lambda: self.getData('tips.csv')},
'04Stacked Data':{'cmd':self.getStackedData},
'05Pima Diabetes':
{'cmd': lambda: self.getData('pima.csv')},
'06Titanic':
{'cmd': lambda: self.getData('titanic3.csv')},
'07miRNA expression':
{'cmd': lambda: self.getData('miRNA.csv')},
'08CO2 time series':
{'cmd': lambda: self.getData('co2-ppm-mauna-loa.csv')}
}
self.dataset_menu=self.createPulldown(self.menu,self.dataset_menu)
self.menu.add_cascade(label='Datasets',menu=self.dataset_menu['var'])
self.plots_menu={'01Store plot':{'cmd':self.addPlot},
'02Clear plots':{'cmd':self.updatePlotsMenu},
'03PDF report':{'cmd':self.pdfReport},
'04sep':''}
self.plots_menu=self.createPulldown(self.menu,self.plots_menu)
self.menu.add_cascade(label='Plots',menu=self.plots_menu['var'])
self.plugin_menu={'01Update Plugins':{'cmd':self.discoverPlugins},
'02Install Plugin':{'cmd':self.installPlugin},
'03sep':''}
self.plugin_menu=self.createPulldown(self.menu,self.plugin_menu)
self.menu.add_cascade(label='Plugins',menu=self.plugin_menu['var'])
self.help_menu={'01Online Help':{'cmd':self.online_documentation},
'02About':{'cmd':self.about}}
self.help_menu=self.createPulldown(self.menu,self.help_menu)
self.menu.add_cascade(label='Help',menu=self.help_menu['var'])
self.main.config(menu=self.menu)
return
def bring_to_foreground(self, set_focus=False):
self.main.deiconify()
self.main.attributes('-topmost', True)
self.main.after_idle(self.main.attributes, '-topmost', False)
self.main.lift()
if set_focus:
#Looks like at least on Windows the following is required for the window
#to also get focus (deiconify, ..., iconify, deiconify)
import platform
if platform.system() != "Linux":
# http://stackoverflow.com/a/13867710/261181
self.main.iconify()
self.main.deiconify()
return
def getBestGeometry(self):
"""Calculate optimal geometry from screen size"""
ws = self.main.winfo_screenwidth()
hs = self.main.winfo_screenheight()
self.w = w = ws/1.4; h = hs*0.7
x = (ws/2)-(w/2); y = (hs/2)-(h/2)
g = '%dx%d+%d+%d' % (w,h,x,y)
return g
def setGeometry(self):
self.winsize = self.getBestGeometry()
self.main.geometry(self.winsize)
return
def createPulldown(self,menu,dict):
"""Create pulldown menu, returns a dict"""
var = Menu(menu,tearoff=0)
dialogs.applyStyle(var)
items = list(dict.keys())
items.sort()
for item in items:
if item[-3:] == 'sep':
var.add_separator()
else:
command = None
if 'cmd' in dict[item]:
command = dict[item]['cmd']
if 'sc' in dict[item]:
var.add_command(label='%-25s %9s' %(item[2:],dict[item]['sc']),
command=command)
else:
var.add_command(label='%-25s' %(item[2:]), command=command)
dict['var'] = var
return dict
def progressDialog(self):
t = Toplevel(self)
pb = Progressbar(t, mode="indeterminate")
pb.pack(side="bottom", fill=X)
t.title('Progress')
t.transient(self)
t.grab_set()
t.resizable(width=False, height=False)
return pb
def currentTablePrefs(self):
table = self.getCurrentTable()
table.showPrefs()
return
def preferencesDialog(self):
"""Prefs dialog from config parser info - not yet implemented"""
def save():
d = dialogs.getDictfromTkVars(opts, tkvars, widgets)
p.writeConfig(d)
from . import plotting
defaultfont = 'monospace'
p = Prefs('.dataexplore')
opts = {'layout':{'type':'checkbutton','default':False,'label':'vertical plot tools'},
'fontsize':{'type':'scale','default':12,'range':(5,40),'interval':1,'label':'font size'},
'colormap':{'type':'combobox','default':'Spectral','items':plotting.colormaps},
}
sections = {'main':['layout'],'plot':['fontsize','colormap']}
p.createConfig(opts)
t=Toplevel(self)
dialog, tkvars, widgets = dialogs.dialogFromOptions(t, opts, sections)
dialog.pack(side=TOP,fill=BOTH)
bf=Frame(t)
bf.pack()
Button(bf, text='Save', command=save).pack(side=LEFT)
Button(bf, text='Close', command=t.destroy).pack(side=LEFT)
t.title('About')
t.transient(self)
t.grab_set()
t.resizable(width=False, height=False)
d = dialogs.getDictfromTkVars(opts, tkvars, widgets)
print (d)
return
def loadMeta(self, table, meta):
"""Load meta data for a sheet, this includes plot options and
table selections"""
tablesettings = meta['table']
#rowheadersettings = meta['rowheader']
#print (meta['rowheader'])
if 'childtable' in meta:
childtable = meta['childtable']
childsettings = meta['childselected']
else:
childtable = None
#load plot options
opts = {'mplopts': table.pf.mplopts,
'mplopts3d': table.pf.mplopts3d,
'labelopts': table.pf.labelopts}
#'layoutopts': table.pf.layoutopts}
for m in opts:
if m in meta:
util.setAttributes(opts[m], meta[m])
opts[m].updateFromOptions()
#load table settings
util.setAttributes(table, tablesettings)
#util.setAttributes(table.rowheader, rowheadersettings)
if childtable is not None:
table.createChildTable(df=childtable)
util.setAttributes(table.child, childsettings)
#redraw col selections
if type(table.multiplecollist) is tuple:
table.multiplecollist = list(table.multiplecollist)
table.drawMultipleCols()
return
def saveMeta(self, table):
"""Save meta data such as current plot options"""
meta = {}
#save plot options
meta['mplopts'] = util.getAttributes(table.pf.mplopts)
meta['mplopts3d'] = util.getAttributes(table.pf.mplopts3d)
meta['labelopts'] = util.getAttributes(table.pf.labelopts)
meta['layoutopts'] = util.getAttributes(table.pf.layoutopts)
#save table selections
meta['table'] = util.getAttributes(table)
#meta['rowheader'] = util.getAttributes(table.rowheader)
#save row colors since its a dataframe and isn't picked up by getattributes currently
meta['table']['rowcolors'] = table.rowcolors
#save child table if present
if table.child != None:
meta['childtable'] = table.child.model.df
meta['childselected'] = util.getAttributes(table.child)
return meta
def newProject(self, data=None, df=None):
"""Create a new project from data or empty"""
w = self.closeProject()
if w == None:
return
self.sheets = OrderedDict()
self.sheetframes = {} #store references to enclosing widgets
self.openplugins = {} #refs to running plugins
self.updatePlotsMenu()
for n in self.nb.tabs():
self.nb.forget(n)
if data != None:
for s in sorted(data.keys()):
if s == 'meta':
continue
df = data[s]['table']
if 'meta' in data[s]:
meta = data[s]['meta']
else:
meta=None
#try:
self.addSheet(s, df, meta)
'''except Exception as e:
print ('error reading in options?')
print (e)'''
else:
self.addSheet('sheet1')
self.filename = None
self.projopen = True
self.main.title('DataExplore')
return
def loadProject(self, filename=None, asksave=False):
"""Open project file"""
w=True
if asksave == True:
w = self.closeProject()
if w == None:
return
if filename == None:
filename = filedialog.askopenfilename(defaultextension='.dexpl"',
initialdir=os.getcwd(),
filetypes=[("project","*.dexpl"),
("All files","*.*")],
parent=self.main)
if not filename:
return
ext = os.path.splitext(filename)[1]
if ext != '.dexpl':
print ('does not appear to be a project file')
return
if os.path.isfile(filename):
#pb = self.progressDialog()
#t = threading.Thread()
#t.__init__(target=pd.read_msgpack, args=(filename))
#t.start()
data = pd.read_msgpack(filename)
#create backup file before we change anything
backupfile = filename+'.bak'
pd.to_msgpack(backupfile, data, encoding='utf-8')
else:
print ('no such file')
self.quit()
return
self.newProject(data)
self.filename = filename
self.main.title('%s - DataExplore' %filename)
self.projopen = True
return
def saveProject(self, filename=None):
"""Save project"""
if filename != None:
self.filename = filename
if not hasattr(self, 'filename') or self.filename == None:
self.saveasProject()
else:
self.doSaveProject(self.filename)
return
def saveasProject(self):
"""Save as a new filename"""
filename = filedialog.asksaveasfilename(parent=self.main,
defaultextension='.dexpl',
initialdir=self.defaultsavedir,
filetypes=[("project","*.dexpl")])
if not filename:
return
self.filename=filename
self.doSaveProject(self.filename)
return
def doSaveProject(self, filename):
"""Save sheets as dict in msgpack"""
self._checkTables()
data={}
for i in self.sheets:
table = self.sheets[i]
data[i] = {}
data[i]['table'] = table.model.df
data[i]['meta'] = self.saveMeta(table)
#try:
pd.to_msgpack(filename, data, encoding='utf-8')
#except:
# print('SAVE FAILED!!!')
return
def _checkTables(self):
"""Check tables before saving that so we are not saving
filtered copies"""
for s in self.sheets:
t=self.sheets[s]
if t.filtered==True:
t.showAll()
return
def closeProject(self):
"""Close"""
if self.projopen == False:
w = False
else:
w = messagebox.askyesnocancel("Close Project",
"Save this project?",
parent=self.master)
if w==None:
return
elif w==True:
self.saveProject()
else:
pass
for n in self.nb.tabs():
self.nb.forget(n)
self.filename = None
self.projopen = False
self.main.title('DataExplore')
return w
def importCSV(self):
"""Import csv to a new sheet"""
self.addSheet(select=True)
table = self.getCurrentTable()
table.importCSV(dialog=True)
return
def importURL(self):
"""Import CSV from URL"""
url = simpledialog.askstring("Import url", "Input CSV URL",
parent=self.master)
if url is not None:
name = os.path.basename(url)
df = pd.read_csv(url)
self.addSheet(sheetname=name, df=df, select=True)
return
def exportCSV(self):
"""Import csv to a new sheet"""
table = self.getCurrentTable()
table.doExport()
return
def importExcel(self, filename=None):
if filename is None:
filename = filedialog.askopenfilename(parent=self.master,
defaultextension='.xls',
initialdir=os.getcwd(),
filetypes=[("xls","*.xls"),
("xlsx","*.xlsx"),
("All files","*.*")])
data = | pd.read_excel(filename,sheetname=None) | pandas.read_excel |
import glob
import pandas as pd
import numpy as np
import config
from lcoc import afdc
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
##### Functions #####
###################
### Residential ###
###################
def res_rates_to_utils(scenario = 'baseline',
urdb_rates_file = 'outputs/cost-of-electricity/urdb-res-rates/res_rates.csv',
eia_cw_file = config.EIAID_TO_UTILITY_CW_PATH,
eia_utils_file = config.EIA_RES_PATH,
outpath = 'outputs/cost-of-electricity/res-utilities/'):
"""
Takes res urdb rates from urdb_path and combines with eia_utils_file to produce
utility-lvl annual avg cost of electricity estimates under the following scenarios:
'baseline' (replace eia cost of electricity w/ off-peak TOU rate, if applicable),
'no-tou' (eia cost of electricity only), 'tou-only' (only TOU rates
from URDB are considered).
"""
# Load/Preprocess EIA datasets
eiaid_cw = pd.read_csv(eia_cw_file)
eiaid_cw = eiaid_cw[['eiaid', 'entity', 'state']]
eiaid_utils = pd.read_csv(eia_utils_file)
eiaid_utils.rename(columns={'avg_price_cents_per_kwh': 'eia_cost_per_kwh'}, inplace=True)
eiaid_utils['eia_cost_per_kwh'] = eiaid_utils['eia_cost_per_kwh'] / 100
eiaid_utils = eiaid_utils[eiaid_utils.eiaid!=99999]
wm = lambda x: np.average(x, weights=eiaid_utils.loc[x.index, "customers"])
f = {'customers': 'sum', 'eia_cost_per_kwh': wm}
eiaid_utils = eiaid_utils.groupby(['entity', 'state']).agg(f).reset_index()
#eiaid_utils.columns = eiaid_utils.columns.droplevel(1)
eiaid_res_df = eiaid_cw.merge(eiaid_utils, how='right', on=['entity', 'state'])
eiaid_res_df = eiaid_res_df.drop_duplicates()
# Load URDB Rates
urdb_rates = pd.read_csv(urdb_rates_file, low_memory=False)
# Find Off-Peak TOU Price for URDB Rates
all_tou_rates_df = urdb_rates[urdb_rates.is_tou_rate==1]
eiaid_tou_rates_df = all_tou_rates_df.groupby('eiaid')['electricity_cost_per_kwh'].min().reset_index()
eiaid_tou_rates_df.rename(columns={'electricity_cost_per_kwh': 'offpeak_tou_cost_per_kwh'}, inplace=True)
# Baseline - {MIN((off-peak TOU, EIA average))}
if scenario == "baseline": #default
eiaid_res_df = eiaid_res_df.merge(eiaid_tou_rates_df, how='left', on='eiaid')
tou_rates_used, costs_incl_tou = 0, []
for i in range(len(eiaid_res_df)):
eia_cost = eiaid_res_df.iloc[i].eia_cost_per_kwh
offpeak_tou_cost = eiaid_res_df.iloc[i].offpeak_tou_cost_per_kwh
low_cost = min([eia_cost, offpeak_tou_cost])
if low_cost == offpeak_tou_cost:
tou_rates_used+=1
costs_incl_tou.append(low_cost)
eiaid_res_df['cost_per_kwh'] = costs_incl_tou
print("Complete, {0} utitilies represented ({1} TOU rates used).".format(len(eiaid_res_df),
tou_rates_used))
eiaid_res_df.to_csv(outpath+'res_utils.csv', index=False)
# No-TOU - "Business as Usual", EIA averages used (upper bound)
elif scenario == "no-tou":
eiaid_res_df['cost_per_kwh'] = eiaid_res_df['eia_cost_per_kwh']
print("Complete, {} utilities represented (no TOU rates used).".format(len(eiaid_res_df)))
eiaid_res_df.to_csv(outpath+"upper_bnd_res_utils.csv", index=False)
# TOU-Only - URDB TOU rates only (lower bound)
elif scenario == "tou-only":
eiaid_tou_rates_df['cost_per_kwh'] = eiaid_tou_rates_df['offpeak_tou_cost_per_kwh']
eiaid_tou_rates_df = eiaid_tou_rates_df.merge(eiaid_res_df[['eiaid', 'state', 'customers']], how='inner', on='eiaid')
print("Complete, {} utitilies represented (only TOU rates used).".format(len(eiaid_tou_rates_df)))
eiaid_tou_rates_df.to_csv(outpath+"lower_bnd_res_utils.csv", index=False)
else:
raise ValueError('scenario not in ["baseline", "no_tou", "tou-only"]')
return eiaid_res_df
def res_utils_to_state(utils_file = 'outputs/cost-of-electricity/res-utilities/res_utils.csv',
outfile = 'outputs/cost-of-electricity/res-states/res_states_baseline.csv'):
"""
Takes utility-level cost of electricity and calculates customer-weighted state-level
cost of electricity for the baseline scenario (TOU & No-TOU).
"""
res_util_df = pd.read_csv(utils_file, low_memory=False)
states, cost_per_kwh, customers = [], [], []
for state in set(res_util_df['state']):
temp_df = res_util_df[res_util_df['state'] == state]
tot_customers = temp_df['customers'].sum()
wgt_cost = ((temp_df['cost_per_kwh'] * temp_df['customers']) / tot_customers).sum()
states.append(state)
customers.append(tot_customers)
cost_per_kwh.append(wgt_cost)
state_df = pd.DataFrame({'state': states,
'customers': customers,
'cost_per_kwh': cost_per_kwh})
#Add national estimate
nat_customers = state_df['customers'].sum()
nat_cost_per_kwh = ((state_df['cost_per_kwh'] * state_df['customers']) / nat_customers).sum()
nat_df = pd.DataFrame({'state': ['US'],
'customers': [nat_customers],
'cost_per_kwh': [nat_cost_per_kwh]})
state_df = pd.concat([state_df, nat_df]).reset_index(drop=True)
state_df.to_csv(outfile, index=False)
print('Complete, national cost of electricity is ${}/kWh.'.format(round(nat_cost_per_kwh,2)))
def calculate_state_residential_lcoc(coe_file = 'outputs/cost-of-electricity/res-states/res_states_baseline.csv',
fixed_costs_path = 'data/fixed-costs/residential/',
annual_maint_frac = 0.01, #Annual cost of maintenance (fraction of equip costs)
veh_lifespan = 15,
veh_kwh_per_100miles = 29.82, #source: EIA
aavmt = 10781, #source: 2017 NHTS
fraction_residential_charging = 0.81, #source: EPRI study
fraction_home_l1_charging = 0.16, #source: EPRI study
dr = 0.035, #source: Mercatus
outfile = 'outputs/cost-of-charging/residential/res_states_baseline.csv'):
"""
Function calculates the state-level residential levelized cost of charging, taking
into account the average cost of electricity, fixed costs, and equipment
maintenance.
"""
# Load data
df = pd.read_csv(coe_file)
filenames = ['res_level1.txt', 'res_level2.txt']
fixed_cost_files = [fixed_costs_path + filename for filename in filenames]
fixed_costs = {}
for file in fixed_cost_files:
if 'level1' in file:
plug_typ = 'L1'
elif 'level2' in file:
plug_typ = 'L2'
plug_typ_dict = {}
with open (file) as f:
for line in f:
key, val = line.split(':')
plug_typ_dict[key] = float(val)
fixed_costs[plug_typ] = plug_typ_dict
# Calculate lifetime EVSE cost of maintenance (assumed to be 1% of equipment cost annually)
for plug_typ in fixed_costs.keys():
discounted_lifetime_maint_cost = 0
for i in range(1, veh_lifespan+1):
ann_maint_cost = annual_maint_frac * fixed_costs[plug_typ]['equipment']
discounted_ann_maint_cost = ann_maint_cost / (1+dr)**i
discounted_lifetime_maint_cost += discounted_ann_maint_cost
fixed_costs[plug_typ]['lifetime_evse_maint'] = discounted_lifetime_maint_cost
# Calculate lifetime energy from residential charging
lifetime_miles = veh_lifespan * aavmt
veh_kwh_per_mile = veh_kwh_per_100miles / 100
lifetime_energy_kwh = lifetime_miles * veh_kwh_per_mile
lifetime_residential_energy_kwh = fraction_residential_charging * lifetime_energy_kwh
# Calculate lvl fixed costs for residential L1, L2 charging
try:
lvl_fixed_costs_l1 = (fixed_costs['L1']['equipment'] + fixed_costs['L1']['installation'] \
+ fixed_costs['L1']['lifetime_evse_maint']) / lifetime_residential_energy_kwh
except:
lvl_fixed_costs_l1 = 0
lvl_fixed_costs_l2 = (fixed_costs['L2']['equipment'] + fixed_costs['L2']['installation'] \
+ fixed_costs['L2']['lifetime_evse_maint']) / lifetime_residential_energy_kwh
# Calculate single lvl fixed cost for residential charging
lvl_fixed_costs_res = lvl_fixed_costs_l1 * fraction_home_l1_charging + lvl_fixed_costs_l2 * (1-fraction_home_l1_charging)
# Calculate state-level residential LCOC, write to file
df['lcoc_cost_per_kwh'] = df['cost_per_kwh'] + lvl_fixed_costs_res
df = df[['state', 'lcoc_cost_per_kwh']]
df.to_csv(outfile, index=False)
nat_lcoc = round(float(df[df.state=='US']['lcoc_cost_per_kwh']), 2)
print('LCOC calculation complete, national LCOC (residential) is ${}/kWh'.format(nat_lcoc))
###########################
### Workplace/Public L2 ###
###########################
def calculate_state_workplace_public_l2_lcoc(coe_path = config.EIA_COM_PATH,
fixed_costs_file = 'data/fixed-costs/workplace-public-l2/com_level2.txt',
equip_lifespan = 15,
equip_utilization_kwh_per_day = 30, #source: INL
outpath = 'outputs/cost-of-charging/workplace-public-l2/work_pub_l2_states_baseline.csv'):
"""
Function calculates the state-level workplace/public-L2 levelized cost of charging, taking
into account the average cost of electricity, fixed costs, and equipment
maintenance.
"""
# Load data
df = pd.read_csv(coe_path)
fixed_cost_dict = {}
with open(fixed_costs_file) as f:
for line in f:
key, val = line.split(':')
fixed_cost_dict[key] = float(val)
ann_maint_cost = 0.01 * fixed_cost_dict['equipment']
lifetime_maint_cost = ann_maint_cost * equip_lifespan
fixed_cost_dict['lifetime_evse_maint'] = lifetime_maint_cost
# Calculate lifetime energy output
lifetime_evse_energy_kwh = equip_lifespan * 365 * equip_utilization_kwh_per_day
# Calculate lvl fixed costs for commercial charging
lvl_fixed_costs = (fixed_cost_dict['equipment'] + fixed_cost_dict['installation'] \
+ fixed_cost_dict['lifetime_evse_maint']) / lifetime_evse_energy_kwh
# Calculate state-level workplace/public-L2 LCOC, write to file
df['cost'] = df['cost'] / 100
df['lcoc_cost_per_kwh'] = df['cost'] + lvl_fixed_costs
df.rename(columns={'description': 'state'}, inplace=True)
df = df[['state', 'lcoc_cost_per_kwh']]
df.to_csv(outpath, index=False)
nat_lcoc = round(float(df[df.state=='US']['lcoc_cost_per_kwh']), 2)
print('LCOC calculation complete, national LCOC (workplace/pub-L2) is ${}/kWh'.format(nat_lcoc))
####################
### DCFC Station ###
####################
def dcfc_rates_to_utils(urdb_rates_files = config.DCFC_PROFILES_DICT,
outpath = 'outputs/cost-of-electricity/urdb-dcfc-utilities/'):
"""
Aggregates dcfc urdb rates in urdb_rates_files by utility, keeping the minimum
cost of electricity value.
"""
for prof in urdb_rates_files.keys():
rates_df = | pd.read_csv(urdb_rates_files[prof], low_memory=False) | pandas.read_csv |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Distance Measurement Calculation.py: This file uses the Mahalanobis Distance distance-based #
# matching technique to match donors and recipients #
# #
# Author: <NAME> #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist
xls = pd.ExcelFile('')
# Read in recipients and filter
r = pd.read_excel(xls, 'file_name_recipients')
r = pd.DataFrame(r)
recipients = list(r['respondentid'])
# Read in donors and filter for
d = pd.read_excel(xls, 'file_name_donors')
d = pd.DataFrame(d)
donors = list(d['respondentid'])
#Read in importance and filter for
importance = pd.read_excel(xls, 'file_name_importance')
imp = pd.DataFrame(importance)
imp = imp.sort_values('demo')
#Create diagonal matrix of importance (I)
I = pd.DataFrame.as_matrix(imp["importance"])
I = np.diag(I)
np.savetxt("I_matrix.csv", I, delimiter=",")
#Create X matrix using only variables in I
X = | pd.DataFrame() | pandas.DataFrame |
from package import dataHandler as dh
from package import featureHandler as fh
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.metrics import roc_auc_score,accuracy_score
import random
import numpy as np
import pandas as pd
from sklearn import preprocessing
import itertools
def get_participant_matches(participants):
matches = get_fixed_participant_matches() + get_variable_participant_matches(participants,age_range=5)
return matches
def get_fixed_participant_matches():
"""There are 10 control females and 7 viable PD females. Thus to use most of the data set possible, the 3 of the control females will need to match with 3 PD males
9 of these control females will be a fixed match. 7 control and PD female matches were done so simply by ordering age by ascending order and matching them. The other 3 control females were all older, so this will create matches with the least age difference
Of the 3 older control females, 2 of them will be fix matched with 2 PD males of the closest age. These PD males are not similar in age to any control males, so they would not have been utilised anyway"""
female_matches = [('C010', 'P019'), #53 and 53
('C031', 'P038'), #67 and 57
('C030', 'P021'), #67 and 58
('C028', 'P001'), #69 and 58
('C024', 'P026'), #71 and 62
('C025', 'P027'), #72 and 67
('C014', 'P008')] #74 and 69
mixed_matches = [('C021', 'P002'), #81 and 82
('C032', 'P012')] #94 and 91
return female_matches + mixed_matches
def get_variable_participant_matches(participants, age_range=5):
controls_to_match = participants.loc[['C004','C013','C009','C020','C006','C026']] #C026 is female, everyone else male
viable_matches = dh.df_retrieve(participants,{'is PD': True,'Sex':'Male'})
viable_matches = viable_matches.loc[~viable_matches.index.isin(['P002','P012','P013','P014'])] #exclude these because P002 and P012 matched already with other females, and P013 has weird CoP that results in some features being NaN
#Pair controls with their potential matches
potential_matches_df = | pd.DataFrame(columns=['Possible PD matches','How many']) | pandas.DataFrame |
# coding: utf-8
import pandas as pd
from pandas import Series,DataFrame
import numpy as np
import itertools
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
from collections import Counter
import re
import datetime as dt
from datetime import date
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import string
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from string import punctuation
from nltk.tokenize import TweetTokenizer
from nltk import tokenize
from wordcloud import WordCloud
from PIL import Image
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.cluster import MiniBatchKMeans
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.preprocessing import scale
from sklearn.linear_model import LogisticRegression
from sklearn import model_selection
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.feature_selection import SelectPercentile
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc,precision_score, accuracy_score, recall_score, f1_score
from scipy import interp
import bokeh.plotting as bp
from bokeh.models import HoverTool, BoxSelectTool
from bokeh.plotting import figure, show, output_notebook
from bokeh.io import push_notebook, show, output_notebook
import bokeh.plotting as bplt
import lda
import pyLDAvis
import pyLDAvis.gensim
import warnings
warnings.filterwarnings("ignore")
import logging
import gensim
from gensim import corpora, models, similarities
from gensim.models.word2vec import Word2Vec
from gensim.models.doc2vec import Doc2Vec,TaggedDocument
from gensim.models.ldamodel import LdaModel
from copy import deepcopy
from pprint import pprint
from keras.models import Sequential
from keras.layers import Dense, Dropout, SimpleRNN, LSTM, Activation
from keras.callbacks import EarlyStopping
from keras.preprocessing import sequence
import pickle
import os
print(os.getcwd())
# Importing tweets from csv into dataframe
# (wczytanie danych tweetow z csv do ramki)
try:
tweets = pd.read_csv('bezrobocie_tweets_15.08.2017.csv', names = ['username', 'date', 'retweets', 'favorites', 'text', 'geo', 'mentions', 'hashtags', 'id', 'permalink'], sep=";",
skiprows=1, encoding='utf-8')
except ValueError as exc:
raise ParseError('CSV parse error - %s', parser_context=None)
print(tweets.head())
tweets.text[1]
# Removing duplicates from dataframe
# (usuniecie duplikatow tweetow z ramki)
#print('before', len(tweets)) # 21069
tweets.drop_duplicates(['text'], inplace=True)
print(len(tweets)) # 20803
# Separating the time variable by hour, day, month and year for further analysis using datetime
# (podzial zmiennej data na godzine, rok, miesiac)
tweets['date'] = pd.to_datetime(tweets['date'])
tweets['hour'] = tweets['date'].apply(lambda x: x.hour)
tweets['month'] = tweets['date'].apply(lambda x: x.month)
tweets['day'] = tweets['date'].apply(lambda x: x.day)
tweets['year'] = tweets['date'].apply(lambda x: x.year)
tweets['length'] = tweets["text"].apply(len)
tweets['num_of_words'] = tweets["text"].str.split().apply(len)
# addding 1 column for counting
# (dodanie 1 kolumny do zliczania)
tweets['dummy_count'] = 1
tweets.head(5)
# Changing date into string
# (zamiana daty na string)
tweets['time_decoded'] = | pd.to_datetime(tweets.date) | pandas.to_datetime |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([( | TS('2015-01-03') | pandas.Timestamp |
import numpy as np
import pandas as pd
from datetime import date
### Settings
all_persons = [
101, 102, 103, 104, 105, 106, 107, 211, 212, 213, 214, 215, 216, 217
]
persons_no_task = [0, 0, 0, 0, 0]
nr_of_tasks = 10
nr_of_weeks_new = 54
nr_of_monthly_tasks = 4
hallways = 5 #task number of hallways
### Determine other settings
nr_of_people = len(all_persons)
nr_of_weekly_tasks = nr_of_tasks - nr_of_monthly_tasks
nr_no_task = len(persons_no_task)
persons_no_task_nonzero_ind = []
for n in range(0, len(persons_no_task)):
if persons_no_task[n] != 0:
if persons_no_task[n] not in all_persons:
print('Error: persons_no_task person is not in all_persons')
else:
persons_no_task_nonzero_ind = persons_no_task_nonzero_ind + [
all_persons.index(persons_no_task[n])
]
#%% Preallocate memory
total_task_counter = np.zeros((nr_of_people, ), dtype=float)
task_counters = np.zeros((nr_of_people, nr_of_tasks), dtype=float)
#Example of task_counters table:
# Task 1 Task 2 Task 3
#Person 1 x x x
#Person 2 x x x
tasks = np.full((nr_of_weeks_new, nr_of_tasks), -1, dtype=int)
#Example of tasks table:
# Task 1 Task 2 Task 3
#Week 1 x x x
#Week 2 x x x
current_week_counter = np.zeros((nr_of_people, ), dtype=float)
last_week_counter = np.zeros((nr_of_people, ), dtype=int)
last_2week_counter = np.zeros((nr_of_people, ), dtype=int)
old_noclean_weeks = np.zeros(
(nr_of_people, 1),
dtype=int) #amount of weeks that each person did not clean
datevector = np.zeros((nr_of_weeks_new, 1), dtype=int) #vector for new dates
datevector_str = [] #vector for new dates
first_of_month = np.zeros((nr_of_weeks_new, 1),
dtype=int) #bool if this week of the month
#%% Read old list
file_read = pd.read_csv('task_in.csv', delimiter=';') #table
oldlist_export = np.array(file_read)[:, 0:nr_of_tasks + nr_no_task + 1]
oldlist = oldlist_export[:, 1::].astype(int) #part without dates
#convert to hours from December , 1899 (Excel default) to January 0, 0000
excel_date = date(1899, 12, 30).toordinal() #Start date of Excel numbering
startdate = oldlist_export[-1, 0] + excel_date
print('date = ' + str(date.fromordinal(startdate)))
startdate = startdate + 7 # start first new week
### Set current_week_couter to last old week
last_persons = oldlist[-1, 0:nr_of_tasks]
for current_task in range(0, nr_of_tasks):
# find person that did the current task
roomnr_oldtask = last_persons[current_task]
if roomnr_oldtask != 0 and roomnr_oldtask != -1:
# Find index of person
pers_ind = all_persons.index(roomnr_oldtask)
if current_task == hallways:
current_week_counter[pers_ind] = 0.5
else:
current_week_counter[pers_ind] = 1
### load roomnr_oldtask into task_counters per week
for current_week in range(0, len(oldlist[:, 0])):
for current_task in range(0, nr_of_tasks):
# find person that did the current task
roomnr_oldtask = oldlist[current_week, current_task]
if roomnr_oldtask != 0 and roomnr_oldtask != -1:
# Find index of person
pers_ind = all_persons.index(roomnr_oldtask)
task_counters[pers_ind,
current_task] = task_counters[pers_ind,
current_task] + 1.0
### load room nr of persons that do not have to clean in old_noclean_weeks ...
for current_person_nr in range(0, nr_no_task):
for current_week in range(0, len(oldlist[:, 0])):
roomnr_oldtask = oldlist[current_week, nr_of_tasks + current_person_nr]
if roomnr_oldtask != 0 and roomnr_oldtask != -1:
pers_ind = all_persons.index(roomnr_oldtask)
old_noclean_weeks[pers_ind] = old_noclean_weeks[pers_ind] + 1
### ... and give them compensation points
nr_of_weeks_old = len(oldlist[:, 0])
avg_chance_per_task = np.ones((nr_of_tasks, ), dtype=float)
nr_of_weekly_tasks_done_old = np.sum(task_counters[:, 0])
weekly_chance_old = nr_of_weekly_tasks_done_old / (
nr_of_people * nr_of_weeks_old - np.sum(old_noclean_weeks))
#chance of getting a weekly task
avg_chance_per_task[0:nr_of_weekly_tasks] = weekly_chance_old
nr_of_monthly_tasks_done_old = np.sum(task_counters[:, nr_of_tasks - 1])
monthly_chance_old = nr_of_monthly_tasks_done_old / (
nr_of_people * nr_of_weeks_old - np.sum(old_noclean_weeks))
#chance of getting a monthly task
avg_chance_per_task[nr_of_weekly_tasks:nr_of_tasks] = monthly_chance_old
old_noclean_tasks = old_noclean_weeks * avg_chance_per_task
task_counters = task_counters + old_noclean_tasks #sum all points
total_task_counter_old = np.sum(task_counters, 1)
print('total_task_counter_before= ' + str(total_task_counter_old))
#%% Start calculation
### Fill datevector and find first mondays of the month
t = startdate
for current_week in range(0, nr_of_weeks_new):
datevector[current_week, :] = t
datevector_str = datevector_str + [
date.fromordinal(t).strftime("%d-%m-%Y")
]
first_monday_of_month = date.fromordinal(t).day
if first_monday_of_month <= 7:
first_of_month[current_week] = 1
t = t + 7 #add a week
### Use task_counters to build the tasks table
for current_week in range(0, nr_of_weeks_new):
last_2week_counter = last_week_counter
last_week_counter = current_week_counter
current_week_counter = np.zeros((nr_of_people, ), dtype=float)
for current_task in range(0, nr_of_tasks):
# check if it is hallways because this is the least work so easiest
# to do as task two weeks in a row
# determine if the task has to be done (weekly task or first monday of the month)
if (current_task < nr_of_tasks - nr_of_monthly_tasks) or (
first_of_month[current_week] == 1):
total_task_counter = np.sum(task_counters, 1)
#choose person based on their cleaning score
current_task_counter = np.round(task_counters[:, current_task])
total_task_counter2 = np.round(total_task_counter)
cleaning_score = 18 * current_week_counter + 8 * last_week_counter + 2 * last_2week_counter + 2 * current_task_counter + total_task_counter2
# give people not doing task infinite cleaning score
for n in range(0, len(persons_no_task)):
if persons_no_task[n] != 0:
cleaning_score[all_persons.index(
persons_no_task[n])] = np.inf
# find possibilities
lowest_counter = np.min(cleaning_score)
lowest_ind = np.arange(0, nr_of_people)[
cleaning_score == lowest_counter] #lowest indices
#round cleaning scores because of decimals
random_of_lowest = np.random.randint(
0, len(lowest_ind)) #choose random person
lowest_person = lowest_ind[random_of_lowest]
task_counters[lowest_person,
current_task] = task_counters[lowest_person,
current_task] + 1
current_week_counter[
lowest_person] = current_week_counter[lowest_person] + 1
if current_task == hallways:
task_counters[lowest_person,
current_task] = task_counters[lowest_person,
current_task] - 0.5
current_week_counter[
lowest_person] = current_week_counter[lowest_person] - 0.5
tasks[current_week, current_task] = lowest_person
### update total task counters for people not doing task
total_task_counter = np.sum(task_counters, 1)
total_task_counter_diff = np.sum(total_task_counter) - np.sum(
total_task_counter_old)
tasks_per_pers = total_task_counter_diff / (nr_of_people -
len(persons_no_task_nonzero_ind))
total_task_counter[persons_no_task_nonzero_ind] = total_task_counter[
persons_no_task_nonzero_ind] + tasks_per_pers
print('total_task_counter =' + str(total_task_counter))
# print std deviation
std_deviation = np.std(total_task_counter)
print('std = ' + str(std_deviation))
# convert count of persons back to room numbers
for current_person in range(0, len(all_persons)):
tasks[tasks == current_person] = all_persons[current_person]
#make table with dates, tasks and persons that do not have a task
newlist = np.zeros((nr_of_weeks_new, nr_of_tasks + nr_no_task + 1), dtype=int)
newlist[:, 0] = datevector[:, 0] - excel_date
newlist[:, 1:nr_of_tasks + 1] = tasks
newlist[:, nr_of_tasks + 1:nr_of_tasks + 1 + nr_no_task] = np.ones(
(nr_of_weeks_new, 1), dtype=int) * persons_no_task
colnames = [
'Date', 'Living', 'room', 'Toilets', 'Bathroom', 'Showers', 'Hallways',
'Kit', 'ch', 'en', 'Laundry', 'Notask1', 'Notask2', 'Notask3', 'Notask4',
'Notask5'
]
outtable = pd.DataFrame(
newlist, # values
index=newlist[:, 0], # 1st column as index
columns=colnames) # 1st row as the column names
vector_str = | pd.DataFrame({'Day': datevector_str}) | pandas.DataFrame |
import subprocess
import numpy as np
import pandas as pd
from nicenumber import __version__, getlog
from nicenumber import nicenumber as nn
from pytest import raises
def test_init():
"""Test main package __init__.py"""
# test getlog function works to create logger
log = getlog(__name__)
assert log.name == __name__
# test version strings match
args = ['poetry', 'version', '-s']
toml_ver = subprocess.run(args, capture_output=True, text=True).stdout.rstrip()
assert __version__ == toml_ver
def check_expected_result(func, vals: list):
"""Call function with kw args for each dict in list
Parameters
----------
func : callable
Function to call
vals : list
List of dicts with kw args
"""
for kw, expected_result in vals:
result = func(**kw)
# handle pd.NA without equality
if | pd.isnull(expected_result) | pandas.isnull |
import nltk
from nltk.corpus import stopwords
import pandas as pd
import string
from collections import Counter
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras.layers import Dense, Dropout
import random
from numpy import array
from pandas import DataFrame
from matplotlib import pyplot
from bag_of_words import clean_doc
nltk.download('stopwords')
# load doc, clean and return line of tokens
def doc_to_line(filename, vocab):
doc = load_doc(filename)
tokens = clean_doc(doc)
# filter by vocab
tokens = [w for w in tokens if w in vocab]
return ' '.join(tokens)
# load doc into memory
def load_doc(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
def evaluate_mode(X_train, y_train, X_test, y_test):
scores = list()
n_repeats = 2
n_words = X_test.shape[1]
for i in range(n_repeats):
model = get_model(n_words)
# fit network
model.fit(X_train, y_train, epochs=5, verbose=1)
# evaluate
loss, acc = model.evaluate(X_test, y_test, verbose=1)
scores.append(acc)
print('%d accuracy: %s' % ((i+1), acc))
return scores
def get_model(n_words):
# define network
model = Sequential()
model.add(Dense(50, input_shape=(n_words,), activation='relu'))
model.add(Dense(4, activation='sigmoid'))
# compile network
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def get_data(data):
# load the vocabulary
vocab_filename = 'data/vocab.txt'
vocab = load_doc(vocab_filename)
vocab = vocab.split()
vocab = set(vocab)
sentences = data['productDisplayName'].values.tolist()
usage = | pd.get_dummies(data['season']) | pandas.get_dummies |
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_subjectinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for subject
information that includes in the returned `Aecg`: subject unique identifier
(USUBJID), gender, birthtime, and race.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# USUBJID
# =======================================
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"SUBJECTINFO",
"USUBJID_" + n))
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} found: {valrow["VALUE"]}')
aecg.USUBJID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""):
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID cannot be established.')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# SEX / GENDER
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/"
"administrativeGenderCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"SEX"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX found: {valrow["VALUE"]}')
aecg.SEX = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# BIRTHTIME
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/birthTime",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "SUBJECTINFO",
"BIRTHTIME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME found.')
aecg.BIRTHTIME = valrow["VALUE"]
# age_in_years = aecg.subject_age_in_years()
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# RACE
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/raceCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"RACE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE found: {valrow["VALUE"]}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE not found')
aecg.RACE = valrow["VALUE"]
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_trtainfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for treatment
information that includes in the returned `Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/definition/"
"treatmentGroupAssignment/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TRTA"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information found: {valrow["VALUE"]}')
aecg.TRTA = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_studyinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts study information
This function parses the `aecg_doc` xml document searching for study
information that includes in the returned `Aecg`: study unique identifier
(STUDYID), and study title.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"STUDYID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} found: {valrow["VALUE"]}')
aecg.STUDYID[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/title",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "STUDYINFO",
"STUDYTITLE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE found: {tmp}')
aecg.STUDYTITLE = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_timepoints(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts timepoints information
This function parses the `aecg_doc` xml document searching for timepoints
information that includes in the returned `Aecg`: absolute timepoint or
study event information (TPT), relative timepoint or study event relative
to a reference event (RTPT), and protocol timepoint information (PTPT).
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# TPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/reasonCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TPT_reasonCode"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode found: {valrow["VALUE"]}')
aecg.TPT["reasonCode"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/"
"effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
# =======================================
# RTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename,
"STUDYINFO",
"RTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} found: {valrow["VALUE"]}')
aecg.RTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"unit",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity_unit"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity_unit"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
# =======================================
# PTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/"
"componentOf/protocolTimepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"PTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} found: {valrow["VALUE"]}')
aecg.PTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code found: {valrow["VALUE"]}')
aecg.PTPT["referenceEvent"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent_"
"displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName found: '
f'{valrow["VALUE"]}')
aecg.PTPT["referenceEvent_displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
return aecg
def parse_rhythm_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm waveform information
This function parses the `aecg_doc` xml document searching for rhythm
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "RHYTHM",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} found: {valrow["VALUE"]}')
aecg.RHYTHMID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["code"] = valrow["VALUE"]
if aecg.RHYTHMCODE["code"] != "RHYTHM":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "RHYTHM",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "RHYTHM",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} found: {valrow["VALUE"]}')
aecg.RHYTHMEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts derived waveform information
This function parses the `aecg_doc` xml document searching for derived
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} found: {valrow["VALUE"]}')
aecg.DERIVEDID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["code"] = valrow["VALUE"]
if aecg.DERIVEDCODE["code"] != "REPRESENTATIVE_BEAT":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "DERIVED",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} found: {valrow["VALUE"]}')
aecg.DERIVEDEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_rhythm_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm's timeseries
This function parses the `aecg_doc` xml document searching for rhythm
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.RHYTHMLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/component/sequenceSet/' \
'component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: {valrow["VALUE"]}')
aecg.RHYTHMTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD found: {valrow2["VALUE"]}')
aecg.RHYTHMTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.RHYTHMTIME["increment"] = float(
valrow2["VALUE"])
else:
aecg.RHYTHMTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = \
valpd.append(pd.DataFrame([valrow2],
columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: '
f'{valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from RHYTHM sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed RHYTHMTIME
aecglead.LEADTIME = copy.deepcopy(aecg.RHYTHMTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"ORIGIN is not a "\
"number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE '
f'value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"SCALE is not a "\
"number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Include digits if requested
if include_digits:
rel_path = "../value/digits"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_LEAD_DIGITS"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
try:
# Convert string of digits to list of integers
# remove new lines
sdigits = valrow2["VALUE"].replace("\n", " ")
# remove carriage retruns
sdigits = sdigits.replace("\r", " ")
# remove tabs
sdigits = sdigits.replace("\t", " ")
# collapse 2 or more spaces into 1 space char
# and remove leading/trailing white spaces
sdigits = re.sub("\\s+", " ", sdigits).strip()
# Convert string into list of integers
aecglead.digits = [int(s) for s in
sdigits.split(' ')]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS added to lead'
f' {aecglead.leadname} (n: '
f'{len(aecglead.digits)})')
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'Error parsing DIGITS from '
f'string to list of integers: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "Error parsing SEQUENCE_"\
"LEAD_DIGITS from string"\
" to list of integers"
else:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS not found for lead {aecglead.leadname}')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS were not requested by the user')
aecg.RHYTHMLEADS.append(copy.deepcopy(aecglead))
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if valpd.shape[0] > 0:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False):
"""Parses `aecg_doc` XML document and extracts derived's timeseries
This function parses the `aecg_doc` xml document searching for derived
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.DERIVEDLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/derivation/derivedSeries/component'\
'/sequenceSet/component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet code found: {valrow["VALUE"]}')
aecg.DERIVEDTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_HEAD found: '
f'{valrow2["VALUE"]}')
aecg.DERIVEDTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.DERIVEDTIME["increment"] =\
float(valrow2["VALUE"])
else:
aecg.DERIVEDTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet code found: {valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from DERIVED sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed DERIVEDTIME
aecglead.LEADTIME = copy.deepcopy(aecg.DERIVEDTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = \
"SEQUENCE_LEAD_ORIGIN is not a number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE'
f' value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_SCALE"\
" is not a number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Include digits if requested
if include_digits:
rel_path = "../value/digits"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_DIGITS"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
try:
# Convert string of digits to list of integers
# remove new lines
sdigits = valrow2["VALUE"].replace("\n", " ")
# remove carriage retruns
sdigits = sdigits.replace("\r", " ")
# remove tabs
sdigits = sdigits.replace("\t", " ")
# collapse 2 or more spaces into 1 space char
# and remove leading/trailing white spaces
sdigits = re.sub("\\s+", " ", sdigits).strip()
# Convert string into list of integers
aecglead.digits = [int(s) for s in
sdigits.split(' ')]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS added to lead'
f' {aecglead.leadname} (n: '
f'{len(aecglead.digits)})')
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'Error parsing DIGITS from '
f'string to list of integers: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "Error parsing SEQUENCE_"\
"LEAD_DIGITS from string"\
" to list of integers"
else:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS not found for lead {aecglead.leadname}')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS were not requested by the user')
aecg.DERIVEDLEADS.append(copy.deepcopy(aecglead))
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if valpd.shape[0] > 0:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
anngrp: Dict,
log_validation: bool = False):
"""Parses `aecg_doc` XML document and extracts waveform annotations
This function parses the `aecg_doc` xml document searching for
waveform annotation sets that includes in the returned
:any:`Aecg`. As indicated in the `anngrp` parameter, each annotation set
is stored as an :any:`AecgAnnotationSet` in the :any:`Aecg.RHYTHMANNS`
or :any:`Aecg.DERIVEDANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
anngrp (Dict): includes a `valgroup` key indicating whether the
rhythm or derived waveform annotations should be located, and a
`path_prefix` with the xml path prefix for which start searching
for annotation sets in the `aecg_doc` xml document.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
val_grp = anngrp["valgroup"]
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp}: searching annotations started')
path_prefix = anngrp["path_prefix"]
anns_setnodes = aecg_doc.xpath(path_prefix.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(anns_setnodes) == 0:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'{anngrp["valgroup"]}: no annotation nodes found')
for xmlnode in anns_setnodes:
aecgannset = AecgAnnotationSet()
xmlnode_path = aecg_doc.getpath(xmlnode)
# Annotation set: human author information
valrow = validate_xpath(
aecg_doc,
xmlnode_path + "/author/assignedEntity/assignedAuthorType/"
"assignedPerson/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "RHYTHM", "ANNSET_AUTHOR_NAME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations author: {valrow["VALUE"]}')
aecgannset.person = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations author not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
# Annotation set: device author information
valrow = validate_xpath(aecg_doc,
xmlnode_path + "/author/assignedEntity"
"/assignedAuthorType/"
"assignedDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename,
"RHYTHM",
"ANNSET_AUTHOR_DEVICE_MODEL"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device model: {tmp}')
aecgannset.device["model"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
xmlnode_path +
"/author/assignedEntity/"
"assignedAuthorType/assignedDevice/"
"playedManufacturedDevice/"
"manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename,
"RHYTHM",
"ANNSET_AUTHOR_DEVICE_NAME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device name: {tmp}')
aecgannset.device["name"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device name not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
aecgannset, valpd = parse_annotations(aecg.filename, aecg.zipContainer,
aecg_doc,
aecgannset,
path_prefix,
xmlnode_path,
anngrp["valgroup"],
log_validation)
if len(aecgannset.anns) == 0:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} no annotations set found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
if anngrp["valgroup"] == "RHYTHM":
aecg.RHYTHMANNS.append(copy.deepcopy(aecgannset))
else:
aecg.DERIVEDANNS.append(copy.deepcopy(aecgannset))
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp}: searching annotations finished')
return aecg
def parse_rhythm_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm waveform annotations
This function parses the `aecg_doc` xml document searching for rhtyhm
waveform annotation sets that includes in the returned
:any:`Aecg`. Each annotation set is stored as an :any:`AecgAnnotationSet`
in the :any:`Aecg.RHYTHMANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
aecg = parse_waveform_annotations(
aecg_doc, aecg,
{"valgroup": "RHYTHM",
"path_prefix": "./component/series/subjectOf/annotationSet"},
log_validation)
return aecg
def parse_derived_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts derived waveform annotations
This function parses the `aecg_doc` xml document searching for derived
waveform annotation sets that includes in the returned
:any:`Aecg`. Each annotation set is stored as an :any:`AecgAnnotationSet`
in the :any:`Aecg.DERIVEDANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
aecg = parse_waveform_annotations(
aecg_doc, aecg,
{"valgroup": "DERIVED",
"path_prefix": "./component/series/derivation/"
"derivedSeries/subjectOf/annotationSet"},
log_validation)
return aecg
def read_aecg(xml_filename: str, zip_container: str = "",
include_digits: bool = False,
aecg_schema_filename: str = "",
ns_clean: bool = True, remove_blank_text: bool = True,
in_memory_xml: bool = False,
log_validation: bool = False) -> Aecg:
"""Reads an aECG HL7 XML file and returns an `Aecg` object.
Args:
xml_filename (str): Path to the aECG xml file.
zip_container (str, optional): Zipfile containing the aECG xml. Empty
string if path points to an xml file in the system. Defaults to "".
include_digits (bool, optional): Waveform values are not read nor
parsed if False. Defaults to False.
aecg_schema_filename (str, optional): xsd file to instantiate the
lxml.etree.XMLSchema object for validating the aECG xml document.
Schema validation is not performed if empty string is provided.
Defaults to "".
ns_clean (bool, optional): Indicates whether to clean up namespaces
during XML parsing. Defaults to True.
remove_blank_text (bool, optional): Indicates whether to clean up blank
text during parsing. Defaults to True.
in_memory_xml (bool, optional): If True, keeps a copy of the parsed XML
in :attr:`xmldoc`.
log_validation (bool, optional): If True, populates
:attr:`validatorResults` with parsing information retrieved while
reading and parsing the aECG xml file.
Returns:
Aecg: An aECG object instantiated with the information read from
the `xml_filename` file.
"""
# =======================================
# Initialize Aecg object
# =======================================
aecg = Aecg()
aecg.filename = xml_filename
aecg.zipContainer = zip_container
# =======================================
# Read XML document
# =======================================
aecg_doc = None
parser = etree.XMLParser(ns_clean=ns_clean,
remove_blank_text=remove_blank_text)
if zip_container == "":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'Reading aecg from {xml_filename} [no zip container]')
valrow = new_validation_row(xml_filename, "READFILE", "FILENAME")
valrow["VALUE"] = xml_filename
try:
aecg_doc = etree.parse(xml_filename, parser)
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file loaded and parsed')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
except Exception as ex:
msg = f'Could not open or parse XML file: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
# Add row with zipcontainer rule as PASSED because there is no zip
# container to test
valrow = new_validation_row(xml_filename, "READFILE", "ZIPCONTAINER")
valrow["VALIOUT"] = "PASSED"
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'Reading aecg from {xml_filename} '
f'[zip container: {zip_container}]')
valrow = new_validation_row(xml_filename, "READFILE", "ZIPCONTAINER")
valrow["VALUE"] = zip_container
try:
with zipfile.ZipFile(zip_container, "r") as zf:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'Zip file opened')
valrow2 = new_validation_row(xml_filename, "READFILE",
"FILENAME")
valrow2["VALUE"] = xml_filename
try:
aecg0 = zf.read(xml_filename)
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file read from zip file')
try:
aecg_doc = etree.fromstring(aecg0, parser)
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file loaded and parsed')
except Exception as ex:
msg = f'Could not parse XML file: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow2["VALIOUT"] = "ERROR"
valrow2["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
valrow2["VALIOUT"] = "PASSED"
valrow2["VALIMSG"] = ""
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
| pd.DataFrame([valrow2], columns=VALICOLS) | pandas.DataFrame |
import os
import sqlite3
import numpy as np
import scipy.special as ss
import pylab as pl
import pandas as pd
from astropy.io import fits
import om10_lensing_equations as ole
__all__ = ['LensedHostGenerator', 'generate_lensed_host',
'lensed_sersic_2d', 'random_location']
def boundary_max(data):
ny, nx = data.shape
boundary = np.concatenate((data[:, 0], data[:, -1], data[0, :],
data[-1, :]))
return np.max(boundary)
def write_fits_stamp(data, magnorms, lens_id, galaxy_type, pixel_scale,
outfile, overwrite=True):
boundary_ratio = boundary_max(data)/np.max(data)
if boundary_ratio > 1e-2:
print(f'(boundary max/data max) = {boundary_ratio:.2e} '
f'for {galaxy_type} {lens_id}')
for magnorm in magnorms.values():
if not np.isfinite(magnorm):
raise RuntimeError(f'non-finite magnorm for {lens_id}')
os.makedirs(os.path.dirname(os.path.abspath(outfile)), exist_ok=True)
output = fits.HDUList(fits.PrimaryHDU())
output[0].data = data
output[0].header.set('LENS_ID', lens_id, 'Lens system ID')
output[0].header.set('GALTYPE', galaxy_type, 'Galaxy component type')
for band, magnorm in magnorms.items():
output[0].header.set(f'MAGNORM{band.upper()}', magnorm,
f'magnorm for {band}-band')
output[0].header.set('PIXSCALE', pixel_scale, 'pixel scale in arcseconds')
output.writeto(outfile, overwrite=overwrite)
def lensed_sersic_2d(lens_pix, source_pix, source_cat):
"""
Defines a magnitude of lensed host galaxy using 2d Sersic profile
Parameters
----------
lens_pix: (np.array, np.array)
Arrays of xy pixel coordinates for the lens.
source_pix: (np.array, np.array)
Arrays of xy pixel coordinates for the bulge or disk.
source_cat: dict-like
Dictionary of source parameters.
Returns
-------
(dict, np.array) dictionary of magnorms, lensed image
"""
ysc1 = source_cat['ys1'] # x position of the source, arcseconds
ysc2 = source_cat['ys2'] # y position of the source, arcseconds
Reff = source_cat['Reff_src'] # Effective radius of the source, arcseconds
qs = source_cat['qs'] # axis ratio of the source, b/a
phs = source_cat['phs'] # orientation of the source, degree
ndex = source_cat['ns'] # index of the source
g_limage = ole.sersic_2d(*source_pix, ysc1, ysc2, Reff, qs, phs, ndex)
g_source = ole.sersic_2d(*lens_pix, ysc1, ysc2, Reff, qs, phs, ndex)
g_limage_sum = np.sum(g_limage)
g_source_sum = np.sum(g_source)
if g_limage_sum == 0 or g_source_sum == 0:
raise RuntimeError('lensed image or soruce has zero-valued integral '
f'for lens id {source_cat["lensid"]}')
dmag = -2.5*np.log10(g_limage_sum/g_source_sum)
bands = 'ugrizy'
mag_lensed = {band: source_cat[f'mag_src_{band}'] + dmag for band in bands}
return mag_lensed, g_limage
def generate_lensed_host(xi1, xi2, lens_P, srcP_b, srcP_d, dsx, outdir,
object_type):
"""
Does ray tracing of light from host galaxies using a non-singular
isothermal ellipsoid profile, and writes out a FITS image files
of the results of the ray tracing.
Parameters
----------
xi1: np.array
Array of x-positions of lens image in pixel coordinates
xi2: np.array
Array of y-positions of lens image in pixel coordinates
lens_P: dict
Lens parameters (produced by create_cats_{object_type})
srcP_b: dict
Source bulge parameters (produced by create_cats_{object_type})
srcP_d: dict
Source disk parameters (produced by create_cats_{object_type})
dsx: float
Pixel scale in arcseconds
"""
xlc1 = lens_P['xl1'] # x position of the lens, arcseconds
xlc2 = lens_P['xl2'] # y position of the lens, arcseconds
rlc = 0.0 # core size of Non-singular Isothermal
# Ellipsoid
vd = lens_P['vd'] # velocity dispersion of the lens
zl = lens_P['zl'] # redshift of the lens
zs = srcP_b['zs'] # redshift of the source
rle = ole.re_sv(vd, zl, zs) # Einstein radius of lens, arcseconds.
ql = lens_P['ql'] # axis ratio b/a
le = ole.e2le(1.0 - ql) # scale factor due to projection of ellpsoid
phl = lens_P['phl'] # position angle of the lens, degree
eshr = lens_P['gamma'] # external shear
eang = lens_P['phg'] # position angle of external shear
ekpa = 0.0 # external convergence
ai1, ai2 = ole.alphas_sie(xlc1, xlc2, phl, ql, rle, le, eshr, eang, ekpa,
xi1, xi2)
yi1 = xi1 - ai1
yi2 = xi2 - ai2
lens_id = lens_P['UID_lens']
magnorms, lensed_image_b = lensed_sersic_2d((xi1, xi2), (yi1, yi2), srcP_b)
outfile = os.path.join(outdir, f'{object_type}_lensed_bulges',
f"{lens_id}_bulge.fits")
write_fits_stamp(lensed_image_b, magnorms, lens_id, 'bulge', dsx, outfile)
magnorms, lensed_image_d = lensed_sersic_2d((xi1, xi2), (yi1, yi2), srcP_d)
outfile = os.path.join(outdir, f'{object_type}_lensed_disks',
f"{lens_id}_disk.fits")
write_fits_stamp(lensed_image_d, magnorms, lens_id, 'disk', dsx, outfile)
def random_location(Reff_src, qs, phs, ns, rng=None):
"""Sample a random (x, y) location from the surface brightness
profile of the galaxy. The input parameters are Sersic parameters for the host galaxy.
Parameters:
-----------
Reff_src: float
the effective radius in arcseconds, the radius within which half of the light is contained
qs: float
axis ratio of the source, b/a
phs: float
position angle of the galaxy in degrees
ns: int
Sersic index
rng: numpy.random.RandomState [None]
RandomState object to use for generating random draws from [0, 1).
If None, then create a RandomState with default seeding.
Returns:
-----------
dx: horizontal coordinate of random location (pixel coordinates)
dy: vertical coordinate of random location (pixel coordinates)
"""
if rng is None:
rng = np.random.RandomState()
phs_rad = np.deg2rad(phs-90)
bn = ss.gammaincinv(2. * ns, 0.5)
z = rng.random_sample()
x = ss.gammaincinv(2. * ns, z)
R = (x / bn)**ns * Reff_src
theta = rng.random_sample() * 2 * np.pi
xp, yp = R * np.cos(theta), R * np.sin(theta)
xt = xp * np.sqrt(qs)
yt = yp / np.sqrt(qs)
dx, dy = np.linalg.solve([[np.cos(phs_rad), np.sin(phs_rad)],
[-np.sin(phs_rad), np.cos(phs_rad)]],
[xt, yt])
return dx, dy
def check_random_locations():
"""Defines a random location to compare to"""
npoints = 100000
Reff_disk = 0.2
qs_disk = 0.3
phs_disk = 8.
ns_disk = 1.0
x_d = np.zeros(npoints)
y_d = np.zeros(npoints)
for i in range(npoints):
x_d[i], y_d[i] = random_location(Reff_disk, qs_disk, phs_disk, ns_disk)
bsz = 5.0
nnn = 1000 # number of pixels per side
dsx = bsz/nnn
xi1, xi2 = ole.make_r_coor(nnn, dsx)
src_disk = ole.sersic_2d(xi1,xi2,0.0,0.0,Reff_disk,qs_disk,phs_disk,ns_disk)
src_disk_norm = src_disk/(np.sum(src_disk)*dsx*dsx)
src_disk_px = np.sum(src_disk, axis=1)
src_disk_norm_px = src_disk_px/(np.sum(src_disk_px)*dsx)
src_disk_py = np.sum(src_disk, axis=0)
src_disk_norm_py = src_disk_py/(np.sum(src_disk_py)*dsx)
xsteps = xi1[:,0]
#---------------------------------------------------------------
from matplotlib.ticker import NullFormatter
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
pl.figure(1, figsize=(8, 8))
axScatter = pl.axes(rect_scatter)
axHistx = pl.axes(rect_histx)
axHisty = pl.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x_d, y_d)
axScatter.contour(xi1, xi2, src_disk, colors=['k',])
# now determine nice limits by hand:
binwidth = 0.02
xymax = max(np.max(np.abs(x_d)), np.max(np.abs(y_d)))
lim = (int(xymax/binwidth) + 1) * binwidth
axScatter.set_xlim((-lim, lim))
axScatter.set_ylim((-lim, lim))
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x_d, bins=bins, density=1)
axHistx.plot(xsteps, src_disk_norm_px, 'k-')
axHisty.hist(y_d, bins=bins, density=1,orientation='horizontal')
axHisty.plot(src_disk_norm_py, xsteps, 'k-')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
return 0
class LensedHostGenerator:
"""Class to generate lensed hosts."""
def __init__(self, host_truth_file, lens_truth_file, obj_type, outdir,
pixel_size=0.04, num_pix=250, rng=None):
with sqlite3.connect(host_truth_file) as conn:
host_df = pd.read_sql(f'select * from {obj_type}_hosts', conn) \
.query('image_number==0')
with sqlite3.connect(lens_truth_file) as conn:
lens_df = pd.read_sql(f'select * from {obj_type}_lens', conn)
self.df = | pd.merge(host_df, lens_df, on='lens_cat_sys_id', how='inner') | pandas.merge |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import datetime
from reda.importers.eit_version_2010 import _average_swapped_current_injections
def _extract_adc_data(mat, **kwargs):
"""Extract adc-channel related data (i.e., data that is captured for all 48
channels of the 40-channel medusa system
"""
md = mat['MD'].squeeze()
frequencies = mat['MP']['fm'].take(0)
# it seems that there exist different file formats under this same official
# version.
if md['fm'].size == frequencies.size:
use_v = 0
else:
use_v = 1
# print('@@@')
# import IPython
# IPython.embed()
# exit()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, frequencies.size):
frequency = frequencies[f_id]
if use_v == 0:
def get_field(key):
return md[key][f_id]
elif use_v == 1:
def get_field(key):
indices = np.where(
md['fm'].take(0) == frequencies[f_id])
return md[key].take(0)[indices]
# def get_field(key):
# indices = np.where(md['fm'].take(f_id) == frequencies[f_id])
# return md[key].take(f_id)[indices]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in get_field('Time')]
).T.squeeze()
column_names = ['ch{:02}'.format(i) for i in range(48)]
ab = get_field('cni')
index_pairs = [
(channel, 'Ug3_{}'.format(i)) for channel in column_names
for i in range(3)
]
Ug3 = get_field('Ug3')
ug3_reshaped = Ug3.reshape([Ug3.shape[0], Ug3.shape[1] * 3])
df = pd.DataFrame(
ug3_reshaped,
index=pd.MultiIndex.from_arrays(
[
ab[:, 0],
ab[:, 1],
np.ones(ab.shape[0]) * frequency,
timestamp
],
names=['a', 'b', 'frequency', 'datetime']
),
columns=pd.MultiIndex.from_tuples(
index_pairs, names=['channel', 'parameter'])
)
dfl.append(df)
dfl = pd.concat(dfl)
dfl.sort_index(axis=0, inplace=True)
dfl.sort_index(axis=1, inplace=True)
return dfl
def _extract_md(mat, **kwargs):
"""Note that the md struct for this version is structured differently than
the others...
"""
md = mat['MD'].squeeze()
frequencies = mat['MP']['fm'].take(0)
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, frequencies.size):
def get_field(key):
indices = np.where(
md['fm'].take(0) == frequencies[f_id])
return md[key].take(0)[indices]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in get_field('Time')]
).T.squeeze()
df = pd.DataFrame()
df['datetime'] = timestamp
ab = get_field('cni')
df['a'] = ab[:, 0]
df['b'] = ab[:, 1]
df['U0'] = get_field('U0')
Is3 = get_field('Is3')
df['Is1'] = Is3[:, 0]
df['Is2'] = Is3[:, 1]
df['Is3'] = Is3[:, 2]
df['Is'] = np.mean(Is3, axis=1)
# [mA]
df['Iab'] = df['Is'] * 1000
Il3 = get_field('Il3')
df['Il1'] = Il3[:, 0]
df['Il2'] = Il3[:, 1]
df['Il3'] = Il3[:, 2]
df['Il'] = np.mean(Il3, axis=1)
# [mA]
df['Ileakage'] = df['Il'] * 1000
df['frequency'] = frequencies[f_id]
dfl.append(df)
df = pd.concat(dfl)
return df
def _extract_emd(mat, **kwargs):
emd = mat['EMD'].squeeze()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, emd.size):
# print('Frequency: ', emd[f_id]['fm'])
fdata = emd[f_id]
# fdata_md = md[f_id]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in fdata['Time'].squeeze()]
).T
df = pd.DataFrame(
np.hstack((
timestamp,
fdata['ni'],
fdata['nu'][:, np.newaxis],
fdata['Z3'],
fdata['Is3'],
fdata['Il3'],
fdata['Zg3'],
)),
)
df.columns = (
'datetime',
'a',
'b',
'p',
'Z1',
'Z2',
'Z3',
'Is1',
'Is2',
'Is3',
'Il1',
'Il2',
'Il3',
'Zg1',
'Zg2',
'Zg3',
)
df['frequency'] = np.ones(df.shape[0]) * fdata['fm']
# cast to correct type
df['datetime'] = | pd.to_datetime(df['datetime']) | pandas.to_datetime |
import requests
import pandas as pd
_MACHINE_SCHEDULE_RDB_URL='http://rdb.pri.diamond.ac.uk/php/opr/cs_oprgetjsonyearcal.php'
class MachineScheduleItem:
def __init__(self, item: dict):
self._item = item
@staticmethod
def _str_to_datetime(dt_str: str):
return pd.to_datetime(dt_str, utc=True)
@property
def start(self):
return self._str_to_datetime(self._item['fromdatetime'])
@property
def end(self):
return self._str_to_datetime(self._item['todatetime'])
@property
def duration(self):
return self.end - self.start
@property
def description(self):
year = self.start.year
return F"{self.run} - {self._item['typedescription']}"
@property
def run(self):
return F"{self.start.year} {self._item['description']}"
def __str__(self):
s = F"<MachineScheduleItem: {self.description}>"
return s
def __repr__(self):
return str(self)
class MachineSchedule:
def __init__(self):
self._url = _MACHINE_SCHEDULE_RDB_URL
self._cal = {}
def _get_machine_calendar(self, year):
# Machine Calendar REST API: https://confluence.diamond.ac.uk/x/zAB_Aw
resp = requests.get(self._url,
params={'CALYEAR': year},
headers={'content-type': 'application/json'})
if resp.status_code != 200:
raise RuntimeError(F'Unable to get machine calendar from {self._url}')
cal = resp.json()
if len(cal) == 1:
cal = cal[0]
self._cal.update({year: cal})
return cal
def get_run(self, year, run):
"""Return tuple of (start_time, end_time, [run])"""
if year not in self._cal:
self._get_machine_calendar(year)
run_items = []
for run_item in self._cal[year]['run'][str(run)]:
run_items.append(MachineScheduleItem(run_item))
return run_items[0].start, run_items[-1].end, run_items
def get_run_total_beamtime(self, year, run):
run = self.get_run(year, run)
beamtimes = | pd.Series([r.duration for r in run]) | pandas.Series |
import os
import sys
import math
from neuralprophet.df_utils import join_dataframes
import numpy as np
import pandas as pd
import torch
from collections import OrderedDict
from neuralprophet import hdays as hdays_part2
import holidays as pyholidays
import warnings
import logging
log = logging.getLogger("NP.utils")
def reg_func_abs(weights):
"""Regularization of weights to induce sparcity
Args:
weights (torch tensor): Model weights to be regularized towards zero
Returns:
regularization loss, scalar
"""
return torch.mean(torch.abs(weights)).squeeze()
def reg_func_trend(weights, threshold=None):
"""Regularization of weights to induce sparcity
Args:
weights (torch tensor): Model weights to be regularized towards zero
threshold (float): value below which not to regularize weights
Returns:
regularization loss, scalar
"""
abs_weights = torch.abs(weights)
if threshold is not None and not math.isclose(threshold, 0):
abs_weights = torch.clamp(abs_weights - threshold, min=0.0)
reg = torch.sum(abs_weights).squeeze()
return reg
def reg_func_season(weights):
return reg_func_abs(weights)
def reg_func_events(events_config, country_holidays_config, model):
"""
Regularization of events coefficients to induce sparcity
Args:
events_config (OrderedDict): Configurations (upper, lower windows, regularization) for user specified events
country_holidays_config (OrderedDict): Configurations (holiday_names, upper, lower windows, regularization)
for country specific holidays
model (TimeNet): The TimeNet model object
Returns:
regularization loss, scalar
"""
reg_events_loss = 0.0
if events_config is not None:
for event, configs in events_config.items():
reg_lambda = configs.reg_lambda
if reg_lambda is not None:
weights = model.get_event_weights(event)
for offset in weights.keys():
reg_events_loss += reg_lambda * reg_func_abs(weights[offset])
if country_holidays_config is not None:
reg_lambda = country_holidays_config.reg_lambda
if reg_lambda is not None:
for holiday in country_holidays_config.holiday_names:
weights = model.get_event_weights(holiday)
for offset in weights.keys():
reg_events_loss += reg_lambda * reg_func_abs(weights[offset])
return reg_events_loss
def reg_func_regressors(regressors_config, model):
"""
Regularization of regressors coefficients to induce sparcity
Args:
regressors_config (OrderedDict): Configurations for user specified regressors
model (TimeNet): The TimeNet model object
Returns:
regularization loss, scalar
"""
reg_regressor_loss = 0.0
for regressor, configs in regressors_config.items():
reg_lambda = configs.reg_lambda
if reg_lambda is not None:
weight = model.get_reg_weights(regressor)
reg_regressor_loss += reg_lambda * reg_func_abs(weight)
return reg_regressor_loss
def symmetric_total_percentage_error(values, estimates):
"""Compute STPE
Args:
values (np.array):
estimates (np.array):
Returns:
scalar (float)
"""
sum_abs_diff = np.sum(np.abs(estimates - values))
sum_abs = np.sum(np.abs(estimates) + np.abs(values))
return 100 * sum_abs_diff / (10e-9 + sum_abs)
def season_config_to_model_dims(season_config):
"""Convert the NeuralProphet seasonal model configuration to input dims for TimeNet model.
Args:
season_config (AllSeasonConfig): NeuralProphet seasonal model configuration
Returns:
seasonal_dims (dict(int)): input dims for TimeNet model
"""
if season_config is None or len(season_config.periods) < 1:
return None
seasonal_dims = OrderedDict({})
for name, period in season_config.periods.items():
resolution = period.resolution
if season_config.computation == "fourier":
resolution = 2 * resolution
seasonal_dims[name] = resolution
return seasonal_dims
def get_holidays_from_country(country, df=None):
"""
Return all possible holiday names of given country
Args:
country (string): country name to retrieve country specific holidays
df (Dataframe or list of dataframes): Dataframe or list of dataframes from which datestamps will be
retrieved from
Returns:
A set of all possible holiday names of given country
"""
if df is None:
dates = None
else:
if isinstance(df, list):
df, _ = join_dataframes(df)
dates = df["ds"].copy(deep=True)
if dates is None:
years = np.arange(1995, 2045)
else:
years = list({x.year for x in dates})
# manually defined holidays
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
holiday_names = getattr(hdays_part2, country)(years=years).values()
except AttributeError:
try:
holiday_names = getattr(pyholidays, country)(years=years).values()
except AttributeError:
raise AttributeError("Holidays in {} are not currently supported!".format(country))
return set(holiday_names)
def events_config_to_model_dims(events_config, country_holidays_config):
"""
Convert the NeuralProphet user specified events configurations along with country specific
holidays to input dims for TimeNet model.
Args:
events_config (OrderedDict): Configurations (upper, lower windows, regularization) for user specified events
country_holidays_config (configure.Holidays): Configurations (holiday_names, upper, lower windows, regularization)
for country specific holidays
Returns:
events_dims (OrderedDict): A dictionary with keys corresponding to individual holidays
containing configs with properties such as the mode, list of event delims of the event corresponding to the offsets,
and the indices in the input dataframe corresponding to each event.
"""
if events_config is None and country_holidays_config is None:
return None
additive_events_dims = pd.DataFrame(columns=["event", "event_delim"])
multiplicative_events_dims = pd.DataFrame(columns=["event", "event_delim"])
if events_config is not None:
for event, configs in events_config.items():
mode = configs.mode
for offset in range(configs.lower_window, configs.upper_window + 1):
event_delim = create_event_names_for_offsets(event, offset)
if mode == "additive":
additive_events_dims = additive_events_dims.append(
{"event": event, "event_delim": event_delim}, ignore_index=True
)
else:
multiplicative_events_dims = multiplicative_events_dims.append(
{"event": event, "event_delim": event_delim}, ignore_index=True
)
if country_holidays_config is not None:
lower_window = country_holidays_config.lower_window
upper_window = country_holidays_config.upper_window
mode = country_holidays_config.mode
for country_holiday in country_holidays_config.holiday_names:
for offset in range(lower_window, upper_window + 1):
holiday_delim = create_event_names_for_offsets(country_holiday, offset)
if mode == "additive":
additive_events_dims = additive_events_dims.append(
{"event": country_holiday, "event_delim": holiday_delim}, ignore_index=True
)
else:
multiplicative_events_dims = multiplicative_events_dims.append(
{"event": country_holiday, "event_delim": holiday_delim}, ignore_index=True
)
# sort based on event_delim
event_dims = pd.DataFrame()
if not additive_events_dims.empty:
additive_events_dims = additive_events_dims.sort_values(by="event_delim").reset_index(drop=True)
additive_events_dims["mode"] = "additive"
event_dims = additive_events_dims
if not multiplicative_events_dims.empty:
multiplicative_events_dims = multiplicative_events_dims.sort_values(by="event_delim").reset_index(drop=True)
multiplicative_events_dims["mode"] = "multiplicative"
event_dims = event_dims.append(multiplicative_events_dims)
event_dims_dic = OrderedDict({})
# convert to dict format
for event, row in event_dims.groupby("event"):
event_dims_dic[event] = {
"mode": row["mode"].iloc[0],
"event_delim": list(row["event_delim"]),
"event_indices": list(row.index),
}
return event_dims_dic
def create_event_names_for_offsets(event_name, offset):
"""
Create names for offsets of every event
Args:
event_name (string): Name of the event
offset (int): Offset of the event
Returns:
offset_name (string): A name created for the offset of the event
"""
offset_name = "{}_{}{}".format(event_name, "+" if offset >= 0 else "-", abs(offset))
return offset_name
def regressors_config_to_model_dims(regressors_config):
"""
Convert the NeuralProphet user specified regressors configurations to input dims for TimeNet model.
Args:
regressors_config (OrderedDict): Configurations for user specified regressors
Returns:
regressors_dims (OrderedDict): A dictionary with keys corresponding to individual regressors
and values in a dict containing the mode, and the indices in the input dataframe corresponding to each regressor.
"""
if regressors_config is None:
return None
else:
additive_regressors = []
multiplicative_regressors = []
if regressors_config is not None:
for regressor, configs in regressors_config.items():
mode = configs.mode
if mode == "additive":
additive_regressors.append(regressor)
else:
multiplicative_regressors.append(regressor)
# sort based on event_delim
regressors_dims = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import json
import warnings
import sys
import absl.logging
absl.logging.set_verbosity(absl.logging.ERROR)
import argparse
import logging
import os
"""Silence every warning of notice from tensorflow."""
logging.getLogger('tensorflow').setLevel(logging.ERROR)
os.environ["KMP_AFFINITY"] = "noverbose"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
tf.autograph.set_verbosity(3)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import MeanAbsoluteError, MeanSquaredError
import tensorflow.keras.backend as K
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from utils import RobustMAE, RobustLoss
from utils import Normalizer
from model import Finder
from spektral.data import DisjointLoader
from data_loader import DataLoader
from pymatgen import Composition, Element, MPRester
from matminer.featurizers.base import MultipleFeaturizer
from matminer.featurizers import composition as cf
from matminer.featurizers.composition import ElementFraction
class Worker(object):
"""
**Arguments**
- `train_path`: path to the training database
- `val_path`: path to the validation database
- `test_path`: path to the test database
- `model_path`: path where the best model is saved
- `epochs`: number of epochs
- `learning_rate`: initial learning rate, which is lowered at every epoch by a factor of 0.999
- `batch_size`: minibatch size
- `patience`: number of epochs to wait with no improvement in loss before stopped by early stopping criterion
- `channels`: internal vector dimension of the message passing layer;
- `aggregate_type`: permutation invariant function to aggregate messages
- `mae_loss`: use mean absolute error as the loss function instead of default L1 robust loss
- `train`: flag to train Finder model
- `test`: flag to test Finder model
- `pred_func`: predict a function (multi-target regression)
- `is_pickle`: use this flag if data is stored as a pickle file
- `threshold_radius`: atoms located at a distance less than the threshold radius are bonded in the crystal graph
- `use_crystal_structure`: use crystal structure details (crystal graph)
- `embedding_path`: path where the element embedding JSON files are saved
- `embedding_type`: element embedding type. Available embedding types: mat2vec, onehot, cgcnn, megnet16, mlelmo
"""
def __init__(self,
train_path,
val_path,
test_path,
model_path='saved_models/best_model_gnn',
learning_rate= 3e-4,
epochs=1200,
batch_size=128,
patience=300,
channels=200,
aggregate_type='mean',
mae_loss=False,
train=False,
test=False,
pred_func=False,
is_pickle=False,
threshold_radius=4,
use_crystal_structure=False,
embedding_path='data/embeddings/',
embedding_type='mat2vec',
max_no_atoms=500):
self.train_path = train_path
self.val_path = val_path
self.test_path = test_path
self.model_path = model_path
self.learning_rate = learning_rate
self.epochs = epochs
self.batch_size = batch_size
self.patience = patience
self.channels = channels
self.aggregate_type = aggregate_type
self.mae_loss = mae_loss
self.train = train
self.test = test
self.pred_func = pred_func
self.is_pickle = is_pickle
self.threshold_radius = threshold_radius
self.use_crystal_structure = use_crystal_structure
self.embedding_path = embedding_path
self.embedding_type = embedding_type
self.max_no_atoms = max_no_atoms
self.scaler = Normalizer()
def train_model(self):
dataset_tr = DataLoader(data_path=self.train_path,
is_train=self.train,
pred_func=self.pred_func,
is_pickle=self.is_pickle,
scaler=self.scaler,
embedding_path=self.embedding_path,
embedding_type=self.embedding_type,
threshold_radius=self.threshold_radius,
use_crystal_structure=self.use_crystal_structure,
max_no_atoms=self.max_no_atoms)
## get scaler attribute after fitting on training data
self.scaler = dataset_tr.scaler
scaler_dict = self.scaler.state_dict()
os.makedirs('saved_models/best_model_gnn', exist_ok=True)
json.dump(scaler_dict, open("saved_models/best_model_gnn/scaler_dict.json", 'w' )) # save the state of scaler
dataset_val = DataLoader(data_path=self.val_path,
is_train=False,
pred_func=self.pred_func,
is_pickle=self.is_pickle,
scaler=self.scaler,
embedding_path=self.embedding_path,
embedding_type=self.embedding_type,
use_crystal_structure=self.use_crystal_structure,
max_no_atoms=self.max_no_atoms)
loader_tr = DisjointLoader(dataset_tr, batch_size=self.batch_size, epochs=self.epochs)
loader_val = DisjointLoader(dataset_val, batch_size=self.batch_size, epochs=1)
n_out = dataset_tr.n_labels # Dimension of the target
if self.mae_loss:
robust = False
else:
robust = True
model = Finder(channels=self.channels,
n_out=n_out,
robust=robust,
aggregate_type=self.aggregate_type,
use_crystal_structure=self.use_crystal_structure)
optimizer = Adam(self.learning_rate)
if self.mae_loss:
loss_fn = MeanAbsoluteError()
else:
loss_fn = RobustLoss()
robust_mae = RobustMAE(scaler=self.scaler, pred_func=self.pred_func)
try:
if self.train:
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
target=tf.cast(target, tf.float32)
loss = loss_fn(target, predictions) + sum(model.losses)
if self.mae_loss:
mae = loss_fn(target, predictions)
else:
mae = robust_mae.mean_absolute_error(target, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss, mae
train_mae = []
validation_mae = []
step = loss = 0
tr_mae = 0
validation_data = list(loader_val)
epoch_no = 1
best_val_mae = 1e6
for batch in loader_tr:
step += 1
l, tmae = train_step(*batch)
loss+=l
tr_mae+=tmae
if step == loader_tr.steps_per_epoch:
val_loss = 0
val_mae = 0
for batch_val in validation_data:
val_inputs, val_targets = batch_val
val_predictions = model(val_inputs, training=False)
val_loss += loss_fn(val_targets, val_predictions)
if self.mae_loss:
val_mae+=loss_fn(val_targets, val_predictions)
else:
val_mae += robust_mae.mean_absolute_error(val_targets, val_predictions)
step = 0
K.set_value(optimizer.learning_rate, optimizer.lr*0.999) # reduce learning rate
print('\nEpoch: ', epoch_no)
print("Training Loss: {:.5f} \t Validation Loss: {:.5f}\n".format(loss / loader_tr.steps_per_epoch, val_loss / loader_val.steps_per_epoch))
print("Training MAE: {:.5f} \t Validation MAE: {:.5f}\n".format(tr_mae / loader_tr.steps_per_epoch, val_mae / loader_val.steps_per_epoch))
train_mae.append(tr_mae/loader_tr.steps_per_epoch)
validation_mae.append(val_mae/loader_val.steps_per_epoch)
if val_mae/loader_val.steps_per_epoch < best_val_mae:
# save current best model and scaler metadata
model.save('saved_models/best_model_gnn',save_format='tf')
if len(validation_mae) > self.patience:
if validation_mae[-(self.patience+1)] < min(validation_mae[-self.patience:]):
print(f'\nEarly stopping. No validation loss '
f'improvement in {self.patience} epochs.')
break
with open('results/history.csv', 'a+') as file:
file.write(str(epoch_no)+','+str((tr_mae/loader_tr.steps_per_epoch).numpy())+','+str((val_mae/loader_val.steps_per_epoch).numpy())+'\n')
epoch_no+=1
loss = 0
tr_mae = 0
tm = [t.numpy() for t in train_mae]
vm = [v.numpy() for v in validation_mae]
df = pd.DataFrame({'Train MAE': tm, 'Validation MAE': vm})
df.to_csv('results/training_history.csv')
## plotting
plt.plot(range(1, len(train_mae)+1), train_mae, lw=2, ls='-', c='blue', label='Train')
plt.plot(range(1, len(validation_mae)+1), validation_mae, lw=2, ls='-', c='red', label='Validation')
plt.xlabel('Epoch Number', fontsize=14)
plt.ylabel('Mean Absolute Error', fontsize=14)
plt.legend()
plt.tight_layout()
plt.savefig('results/training_log.png', dpi=100)
# plt.show()
except KeyboardInterrupt:
pass
if self.test:
self.test_model()
def test_model(self):
################################################################################
# Evaluate model
################################################################################
print("\n\nLoading current best model ...\n")
try:
model_path = self.model_path
model = tf.keras.models.load_model(model_path)
except:
print('No model exists. Please run with --train to train the model first')
if self.mae_loss:
loss_fn = MeanAbsoluteError()
else:
loss_fn = RobustLoss()
scaler_dict = json.load(open("{0}/scaler_dict.json".format(self.model_path)))
self.scaler.load_state_dict(state_dict=scaler_dict) # update scaler
robust_mae = RobustMAE(scaler=self.scaler, pred_func=self.pred_func)
dataset_te = DataLoader(data_path=self.test_path,
is_train=False,
pred_func=self.pred_func,
is_pickle=self.is_pickle,
scaler=self.scaler,
embedding_path=self.embedding_path,
embedding_type=self.embedding_type,
use_crystal_structure=self.use_crystal_structure,
max_no_atoms=self.max_no_atoms)
loader_te = DisjointLoader(dataset_te, batch_size=self.batch_size, epochs=1, shuffle=False)
print('Testing the model ...\n')
loss = 0
test_mae = 0
target_list = []
predictions_list = []
uncertainity_list = []
for batch in loader_te:
inputs, target = batch
predictions = model(inputs, training=False)
loss += loss_fn(target, predictions)
if self.mae_loss:
predictions_list.extend(list(predictions.numpy()))
else:
predictions_list.extend(list(tf.split(predictions, 2, axis=-1)[0].numpy()))
uncertainity_list.extend(list(tf.split(predictions, 2, axis=-1)[1].numpy()))
test_mae += robust_mae.mean_absolute_error(target, predictions)
loss /= loader_te.steps_per_epoch
print("Test Loss: {:.5f}".format(loss / loader_te.steps_per_epoch))
# print("Test MAE denormed: {:.5f}".format(test_mae / loader_te.steps_per_epoch))
preds = np.array(predictions_list).flatten()
sigmas = np.array(uncertainity_list).flatten()
predictions = self.scaler.denorm(preds)
uncertainities = tf.math.exp(sigmas) * self.scaler.std
df_in = self.get_valid_compounds( | pd.read_csv(self.test_path) | pandas.read_csv |
import numpy as np
import pandas as pd
import json
from mplsoccer.pitch import Pitch, VerticalPitch
path = "C:/Users/brand/desktop/events/events_England.json"
with open(path) as f:
data = json.load(f)
train = pd.DataFrame(data)
path2 = "C:/Users/brand/desktop/players.json"
with open(path2) as f:
play = json.load(f)
players = pd.DataFrame(play)
lst = ['events_France.json','events_Germany.json','events_Italy.json','events_Spain.json']
pathway = "C:/Users/brand/desktop/events/"
for country in lst:
with open(pathway + country) as f:
datal = json.load(f)
tl = pd.DataFrame(datal)
train = pd.concat([train,tl],ignore_index=True)
#pd.unique(train['subEventName'])
shots = train[train['subEventName'] == 'Shot']
print(len(shots))
shots_model = | pd.DataFrame(columns=["Goal","X","Y"], dtype=object) | pandas.DataFrame |
import unittest
import platform
import random
import string
import platform
import pandas as pd
import numpy as np
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs, count_parfor_OneDs,
count_array_OneDs, dist_IR_contains, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba.config import IS_32BITS
@hpat.jit
def inner_get_column(df):
# df2 = df[['A', 'C']]
# df2['D'] = np.ones(3)
return df.A
COL_IND = 0
class TestDataFrame(unittest.TestCase):
def test_create1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_create_cond1(self):
def test_impl(A, B, c):
if c:
df = pd.DataFrame({'A': A})
else:
df = pd.DataFrame({'A': B})
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.ones(n)
B = np.arange(n) + 1.0
c = 0
pd.testing.assert_series_equal(hpat_func(A, B, c), test_impl(A, B, c))
c = 2
pd.testing.assert_series_equal(hpat_func(A, B, c), test_impl(A, B, c))
@unittest.skip('Implement feature to create DataFrame without column names')
def test_create_without_column_names(self):
def test_impl():
df = pd.DataFrame([100, 200, 300, 400, 200, 100])
return df
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
def test_unbox1(self):
def test_impl(df):
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.random.ranf(n)})
pd.testing.assert_series_equal(hpat_func(df), test_impl(df))
@unittest.skip("needs properly refcounted dataframes")
def test_unbox2(self):
def test_impl(df, cond):
n = len(df)
if cond:
df['A'] = np.arange(n) + 2.0
return df.A
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
pd.testing.assert_series_equal(hpat_func(df.copy(), True), test_impl(df.copy(), True))
pd.testing.assert_series_equal(hpat_func(df.copy(), False), test_impl(df.copy(), False))
@unittest.skip('Implement feature to create DataFrame without column names')
def test_unbox_without_column_names(self):
def test_impl(df):
return df
df = pd.DataFrame([100, 200, 300, 400, 200, 100])
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_box1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_box2(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'bb', 'ccc']})
return df
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(), test_impl())
@unittest.skip("pending df filter support")
def test_box3(self):
def test_impl(df):
df = df[df.A != 'dd']
return df
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_box_categorical(self):
def test_impl(df):
df['A'] = df['A'] + 1
return df
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3],
'B': pd.Series(['N', 'Y', 'Y'],
dtype=pd.api.types.CategoricalDtype(['N', 'Y']))})
pd.testing.assert_frame_equal(hpat_func(df.copy(deep=True)), test_impl(df))
def test_box_dist_return(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df
hpat_func = hpat.jit(distributed={'df'})(test_impl)
n = 11
hres, res = hpat_func(n), test_impl(n)
self.assertEqual(count_array_OneDs(), 3)
self.assertEqual(count_parfor_OneDs(), 2)
dist_sum = hpat.jit(
lambda a: hpat.distributed_api.dist_reduce(
a, np.int32(hpat.distributed_api.Reduce_Type.Sum.value)))
dist_sum(1) # run to compile
np.testing.assert_allclose(dist_sum(hres.A.sum()), res.A.sum())
np.testing.assert_allclose(dist_sum(hres.B.sum()), res.B.sum())
def test_len1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.random.ranf(n)})
return len(df)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_shape1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.random.ranf(n)})
return df.shape
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_column_getitem1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df['A'].values
return Ac.sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
def test_column_list_getitem1(self):
def test_impl(df):
return df[['A', 'C']]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame(
{'A': np.arange(n), 'B': np.ones(n), 'C': np.random.ranf(n)})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_filter1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df[df.A > .5]
return df1.B.sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_filter2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df.loc[df.A > .5]
return np.sum(df1.B)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_filter3(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + n, 'B': np.arange(n)**2})
df1 = df.iloc[(df.A > .5).values]
return np.sum(df1.B)
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_iloc1(self):
def test_impl(df, n):
return df.iloc[1:n].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc2(self):
def test_impl(df, n):
return df.iloc[np.array([1, 4, 9])].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc3(self):
def test_impl(df):
return df.iloc[:, 1].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
@unittest.skip("TODO: support A[[1,2,3]] in Numba")
def test_iloc4(self):
def test_impl(df, n):
return df.iloc[[1, 4, 9]].B.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df, n), test_impl(df, n))
def test_iloc5(self):
# test iloc with global value
def test_impl(df):
return df.iloc[:, COL_IND].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_loc1(self):
def test_impl(df):
return df.loc[:, 'B'].values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_iat1(self):
def test_impl(n):
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
return df.iat[3, 1]
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_iat2(self):
def test_impl(df):
return df.iat[3, 1]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
self.assertEqual(hpat_func(df), test_impl(df))
def test_iat3(self):
def test_impl(df, n):
return df.iat[n - 1, 1]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
self.assertEqual(hpat_func(df, n), test_impl(df, n))
def test_iat_set1(self):
def test_impl(df, n):
df.iat[n - 1, 1] = n**2
return df.A # return the column to check column aliasing
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
df2 = df.copy()
pd.testing.assert_series_equal(hpat_func(df, n), test_impl(df2, n))
def test_iat_set2(self):
def test_impl(df, n):
df.iat[n - 1, 1] = n**2
return df # check df aliasing/boxing
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'B': np.ones(n), 'A': np.arange(n) + n})
df2 = df.copy()
pd.testing.assert_frame_equal(hpat_func(df, n), test_impl(df2, n))
def test_set_column1(self):
# set existing column
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df['A'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column_reflect4(self):
# set existing column
def test_impl(df, n):
df['A'] = np.arange(n)
hpat_func = hpat.jit(test_impl)
n = 11
df1 = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df2 = df1.copy()
hpat_func(df1, n)
test_impl(df2, n)
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(df1, df2, check_dtype=do_check)
def test_set_column_new_type1(self):
# set existing column with a new type
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n) + 3.0})
df['A'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column2(self):
# create new column
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n) + 1.0})
df['C'] = np.arange(n)
return df
hpat_func = hpat.jit(test_impl)
n = 11
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n), check_dtype=do_check)
def test_set_column_reflect3(self):
# create new column
def test_impl(df, n):
df['C'] = np.arange(n)
hpat_func = hpat.jit(test_impl)
n = 11
df1 = pd.DataFrame({'A': np.ones(n, np.int64), 'B': np.arange(n) + 3.0})
df2 = df1.copy()
hpat_func(df1, n)
test_impl(df2, n)
do_check = False if platform.system() == 'Windows' and not IS_32BITS else True
pd.testing.assert_frame_equal(df1, df2, check_dtype=do_check)
def test_set_column_bool1(self):
def test_impl(df):
df['C'] = df['A'][df['B']]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, False, True]})
df2 = df.copy()
test_impl(df2)
hpat_func(df)
pd.testing.assert_series_equal(df.C, df2.C)
def test_set_column_reflect1(self):
def test_impl(df, arr):
df['C'] = arr
return df.C.sum()
hpat_func = hpat.jit(test_impl)
n = 11
arr = np.random.ranf(n)
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
hpat_func(df, arr)
self.assertIn('C', df)
np.testing.assert_almost_equal(df.C.values, arr)
def test_set_column_reflect2(self):
def test_impl(df, arr):
df['C'] = arr
return df.C.sum()
hpat_func = hpat.jit(test_impl)
n = 11
arr = np.random.ranf(n)
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df2 = df.copy()
np.testing.assert_almost_equal(hpat_func(df, arr), test_impl(df2, arr))
def test_df_values1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.values
hpat_func = hpat.jit(test_impl)
n = 11
np.testing.assert_array_equal(hpat_func(n), test_impl(n))
def test_df_values2(self):
def test_impl(df):
return df.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_df_values_parallel1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.values.sum()
hpat_func = hpat.jit(test_impl)
n = 11
np.testing.assert_array_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_df_apply(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)})
B = df.apply(lambda r: r.A + r.B, axis=1)
return df.B.sum()
n = 121
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
def test_df_apply_branch(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)})
B = df.apply(lambda r: r.A < 10 and r.B > 20, axis=1)
return df.B.sum()
n = 121
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
def test_df_describe(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float32),
'B': np.arange(n)})
#df.A[0:1] = np.nan
return df.describe()
hpat_func = hpat.jit(test_impl)
n = 1001
hpat_func(n)
# XXX: test actual output
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_sort_values(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.B.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n), 'B': np.arange(n), 'C': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_copy(self):
def test_impl(df):
df2 = df.sort_values('A')
return df2.B.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n), 'B': np.arange(n), 'C': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_single_col(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.A.values
n = 1211
np.random.seed(2)
df = pd.DataFrame({'A': np.random.ranf(n)})
hpat_func = hpat.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df.copy()), test_impl(df))
def test_sort_values_single_col_str(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.A.values
n = 1211
random.seed(2)
str_vals = []
for _ in range(n):
k = random.randint(1, 30)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals.append(val)
df = pd.DataFrame({'A': str_vals})
hpat_func = hpat.jit(test_impl)
self.assertTrue((hpat_func(df.copy()) == test_impl(df)).all())
def test_sort_values_str(self):
def test_impl(df):
df.sort_values('A', inplace=True)
return df.B.values
n = 1211
random.seed(2)
str_vals = []
str_vals2 = []
for i in range(n):
k = random.randint(1, 30)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals.append(val)
val = ''.join(random.choices(string.ascii_uppercase + string.digits, k=k))
str_vals2.append(val)
df = pd.DataFrame({'A': str_vals, 'B': str_vals2})
# use mergesort for stability, in str generation equal keys are more probable
sorted_df = df.sort_values('A', inplace=False, kind='mergesort')
hpat_func = hpat.jit(test_impl)
self.assertTrue((hpat_func(df) == sorted_df.B.values).all())
def test_sort_parallel_single_col(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
# TODO: better parallel sort test
def test_impl():
df = pd.read_parquet('kde.parquet')
df.sort_values('points', inplace=True)
res = df.points.values
return res
hpat_func = hpat.jit(locals={'res:return': 'distributed'})(test_impl)
save_min_samples = hpat.hiframes.sort.MIN_SAMPLES
try:
hpat.hiframes.sort.MIN_SAMPLES = 10
res = hpat_func()
self.assertTrue((np.diff(res) >= 0).all())
finally:
# restore global val
hpat.hiframes.sort.MIN_SAMPLES = save_min_samples
def test_df_isna1(self):
'''Verify DataFrame.isna implementation for various types of data'''
def test_impl(df):
return df.isna()
hpat_func = hpat.jit(test_impl)
# TODO: add column with datetime values when test_series_datetime_isna1 is fixed
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0],
'B': [np.inf, 5, np.nan, 6],
'C': ['aa', 'b', None, 'ccc'],
'D': [None, 'dd', '', None]})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_str1(self):
'''Verifies DataFrame.astype implementation converting various types to string'''
def test_impl(df):
return df.astype(str)
hpat_func = hpat.jit(test_impl)
# TODO: add column with float values when test_series_astype_float_to_str1 is fixed
df = pd.DataFrame({'A': [-1, 2, 11, 5, 0, -7],
'B': ['aa', 'bb', 'cc', 'dd', '', 'fff']
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_float1(self):
'''Verifies DataFrame.astype implementation converting various types to float'''
def test_impl(df):
return df.astype(np.float64)
hpat_func = hpat.jit(test_impl)
# TODO: uncomment column with string values when test_series_astype_str_to_float64 is fixed
df = pd.DataFrame({'A': [-1, 2, 11, 5, 0, -7],
# 'B': ['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'],
'C': [3.24, 1E+05, -1, -1.3E-01, np.nan, np.inf]
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_astype_int1(self):
'''Verifies DataFrame.astype implementation converting various types to int'''
def test_impl(df):
return df.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 6
# TODO: uncomment column with string values when test_series_astype_str_to_int32 is fixed
df = pd.DataFrame({'A': np.ones(n, dtype=np.int64),
'B': np.arange(n, dtype=np.int32),
# 'C': ['-1', '2', '3', '0', '-7', '99'],
'D': np.arange(float(n), dtype=np.float32)
})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_sort_parallel(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
# TODO: better parallel sort test
def test_impl():
df = pd.read_parquet('kde.parquet')
df['A'] = df.points.astype(np.float64)
df.sort_values('points', inplace=True)
res = df.A.values
return res
hpat_func = hpat.jit(locals={'res:return': 'distributed'})(test_impl)
save_min_samples = hpat.hiframes.sort.MIN_SAMPLES
try:
hpat.hiframes.sort.MIN_SAMPLES = 10
res = hpat_func()
self.assertTrue((np.diff(res) >= 0).all())
finally:
# restore global val
hpat.hiframes.sort.MIN_SAMPLES = save_min_samples
def test_itertuples(self):
def test_impl(df):
res = 0.0
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.ones(n, np.int64)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_itertuples_str(self):
def test_impl(df):
res = ""
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 3
df = pd.DataFrame({'A': ['aa', 'bb', 'cc'], 'B': np.ones(n, np.int64)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_itertuples_order(self):
def test_impl(n):
res = 0.0
df = pd.DataFrame({'B': np.arange(n), 'A': np.ones(n, np.int64)})
for r in df.itertuples():
res += r[1]
return res
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_itertuples_analysis(self):
"""tests array analysis handling of generated tuples, shapes going
through blocks and getting used in an array dimension
"""
def test_impl(n):
res = 0
df = pd.DataFrame({'B': np.arange(n), 'A': np.ones(n, np.int64)})
for r in df.itertuples():
if r[1] == 2:
A = np.ones(r[1])
res += len(A)
return res
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
@unittest.skipIf(platform.system() == 'Windows', "Attribute 'dtype' are different int64 and int32")
def test_df_head1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.arange(n)})
return df.head(3)
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n))
def test_pct_change1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.pct_change(3)
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_frame_equal(hpat_func(n), test_impl(n))
def test_mean1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.mean()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_median1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': 2 ** np.arange(n), 'B': np.arange(n) + 1.0})
return df.median()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_std1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.std()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_var1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.var()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_max1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.max()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_min1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.min()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_sum1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.sum()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_prod1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.prod()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_count1(self):
# TODO: non-numeric columns should be ignored automatically
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.arange(n) + 1})
return df.count()
hpat_func = hpat.jit(test_impl)
n = 11
pd.testing.assert_series_equal(hpat_func(n), test_impl(n))
def test_df_fillna1(self):
def test_impl(df):
return df.fillna(5.0)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
hpat_func = hpat.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
def test_df_fillna_str1(self):
def test_impl(df):
return df.fillna("dd")
df = | pd.DataFrame({'A': ['aa', 'b', None, 'ccc']}) | pandas.DataFrame |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32")
_check_cast(casted, "float32")
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16")
_check_cast(casted, "float16")
def test_astype_mixed_type(self, mixed_type_frame):
# mixed casting
mn = mixed_type_frame._get_numeric_data().copy()
mn["little_float"] = np.array(12345.0, dtype="float16")
mn["big_float"] = np.array(123456789101112.0, dtype="float64")
casted = mn.astype("float64")
_check_cast(casted, "float64")
casted = mn.astype("int64")
_check_cast(casted, "int64")
casted = mn.reindex(columns=["little_float"]).astype("float16")
_check_cast(casted, "float16")
casted = mn.astype("float32")
_check_cast(casted, "float32")
casted = mn.astype("int32")
_check_cast(casted, "int32")
# to object
casted = mn.astype("O")
_check_cast(casted, "object")
def test_astype_with_exclude_string(self, float_frame):
df = float_frame.copy()
expected = float_frame.astype(int)
df["string"] = "foo"
casted = df.astype(int, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
df = float_frame.copy()
expected = float_frame.astype(np.int32)
df["string"] = "foo"
casted = df.astype(np.int32, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_with_view_mixed_float(self, mixed_float_frame):
tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with pytest.raises(ValueError, match=msg):
df.astype(dtype)
def test_astype_str(self):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
result = df.astype(str)
expected = DataFrame(
{
"a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(str, map(Timestamp, b._values))),
"c": list(
map(
str,
map(lambda x: Timedelta(x)._repr_base(format="all"), c._values),
)
),
"d": list(map(str, d._values)),
"e": list(map(str, e._values)),
}
)
tm.assert_frame_equal(result, expected)
def test_astype_str_float(self):
# see gh-11302
result = DataFrame([np.NaN]).astype(str)
expected = DataFrame(["nan"])
tm.assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(str)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = "1.12345678901" if _np_version_under1p14 else "1.1234567890123457"
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range("2010-01-04", periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(["1.0", "2", "3.14", "4", "5.4"])
df = DataFrame({"a": a, "b": b, "c": c, "d": d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({"b": "str", "d": "float32"})
result = df.astype(dt1)
expected = DataFrame(
{
"a": a,
"b": Series(["0", "1", "2", "3", "4"]),
"c": c,
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float32"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
dt2 = dtype_class({"b": np.float32, "c": "float32", "d": np.float64})
result = df.astype(dt2)
expected = DataFrame(
{
"a": a,
"b": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype="float32"),
"c": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype="float32"),
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({"a": str, "b": str, "c": str, "d": str})
tm.assert_frame_equal(df.astype(dt3), df.astype(str))
tm.assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({"b": str, 2: str})
dt5 = dtype_class({"e": str})
msg = "Only a column name can be used for the key in a dtype mappings argument"
with pytest.raises(KeyError, match=msg):
df.astype(dt4)
with pytest.raises(KeyError, match=msg):
df.astype(dt5)
tm.assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name="a")
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name="b")
a2 = Series([0, 1, 2, 3, 4], name="a")
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(["1", "2", "3", "4", "5"], dtype="str", name="a")
b_str = Series(["0.1", "0.2", "0.4", "0.6", "0.8"], dtype=str, name="b")
a2_str = Series(["0", "1", "2", "3", "4"], dtype="str", name="a")
expected = concat([a1_str, b_str, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
result = df.astype({"a": "str"})
expected = concat([a1_str, b, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list("abcdef")),
CategoricalDtype(categories=list("edba"), ordered=False),
CategoricalDtype(categories=list("edcb"), ordered=True),
],
ids=repr,
)
def test_astype_categorical(self, dtype):
# GH 18099
d = {"A": list("abbc"), "B": list("bccd"), "C": list("cdde")}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cls",
[
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype,
],
)
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
xpr = "Expected an instance of {}".format(cls.__name__)
with pytest.raises(TypeError, match=xpr):
df.astype({"A": cls})
with pytest.raises(TypeError, match=xpr):
df["A"].astype(cls)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
expected1 = pd.DataFrame(
{
"a": integer_array([1, 3, 5], dtype=dtype),
"b": integer_array([2, 4, 6], dtype=dtype),
}
)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype("float64"), df)
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
df["b"] = df["b"].astype(dtype)
expected2 = pd.DataFrame(
{"a": [1.0, 3.0, 5.0], "b": integer_array([2, 4, 6], dtype=dtype)}
)
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
expected1 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
df["a"] = df["a"].astype(dtype)
expected2 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["category", "Int64"])
def test_astype_extension_dtypes_duplicate_col(self, dtype):
# GH 24704
a1 = Series([0, np.nan, 4], name="a")
a2 = Series([np.nan, 3, 5], name="a")
df = concat([a1, a2], axis=1)
result = df.astype(dtype)
expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [{100: "float64", 200: "uint64"}, "category", "float64"]
)
def test_astype_column_metadata(self, dtype):
# GH 19920
columns = pd.UInt64Index([100, 200, 300], name="foo")
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
assert (result.dtypes == object).all()
if dtype.startswith("M8"):
assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)
else:
assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
dtype = "M8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns"])
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["us", "ms", "s", "h", "m", "D"])
def test_astype_to_timedelta_unit(self, unit):
# coerce to float
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(df.values.astype(dtype).astype(float))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_incorrect_datetimelike(self, unit):
# trying to astype a m to a M, or vice-versa
# gh-19224
dtype = "M8[{}]".format(unit)
other = "m8[{}]".format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
msg = (
r"cannot astype a datetimelike from \[datetime64\[ns\]\] to"
r" \[timedelta64\[{}\]\]"
).format(unit)
with pytest.raises(TypeError, match=msg):
df.astype(other)
msg = (
r"cannot astype a timedelta from \[timedelta64\[ns\]\] to"
r" \[datetime64\[{}\]\]"
).format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError, match=msg):
df.astype(dtype)
def test_timedeltas(self):
df = DataFrame(
dict(
A=Series(date_range("2012-1-1", periods=3, freq="D")),
B=Series([timedelta(days=i) for i in range(3)]),
)
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
def test_arg_for_errors_in_astype(self):
# issue #14878
df = DataFrame([1, 2, 3])
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
df.astype(np.int8, errors="ignore")
def test_arg_for_errors_in_astype_dictlist(self):
# GH-25905
df = pd.DataFrame(
[
{"a": "1", "b": "16.5%", "c": "test"},
{"a": "2.2", "b": "15.3", "c": "another_test"},
]
)
expected = pd.DataFrame(
[
{"a": 1.0, "b": "16.5%", "c": "test"},
{"a": 2.2, "b": "15.3", "c": "another_test"},
]
)
type_dict = {"a": "float64", "b": "float64", "c": "object"}
result = df.astype(dtype=type_dict, errors="ignore")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
tm.assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected",
[
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(
DataFrame(
{
"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object),
}
),
True,
),
# multi-extension
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["a", "b"])}
),
True,
),
# differ types
(DataFrame({"A": [1, 2], "B": [1.0, 2.0]}), False),
# differ sizes
(
DataFrame(
{
"A": np.array([1, 2], dtype=np.int32),
"B": np.array([1, 2], dtype=np.int64),
}
),
False,
),
# multi-extension differ
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["b", "c"])}
),
False,
),
],
)
def test_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_asarray_homogenous(self):
df = pd.DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])})
result = np.asarray(df)
# may change from object in the future
expected = np.array([[1, 1], [2, 2]], dtype="object")
tm.assert_numpy_array_equal(result, expected)
def test_str_to_small_float_conversion_type(self):
# GH 20388
np.random.seed(13)
col_data = [str(np.random.random() * 1e-12) for _ in range(5)]
result = pd.DataFrame(col_data, columns=["A"])
expected = pd.DataFrame(col_data, columns=["A"], dtype=object)
tm.assert_frame_equal(result, expected)
# change the dtype of the elements from object to float one by one
result.loc[result.index, "A"] = [float(x) for x in col_data]
expected = pd.DataFrame(col_data, columns=["A"], dtype=float)
tm.assert_frame_equal(result, expected)
class TestDataFrameDatetimeWithTZ:
def test_interleave(self, timezone_frame):
# interleave with object
result = timezone_frame.assign(D="foo").values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
["foo", "foo", "foo"],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = timezone_frame.values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
| Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern") | pandas.Timestamp |
from datetime import datetime, timedelta
from typing import Any
import weakref
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.period import Period
from pandas._typing import DtypeObj, Label
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool_dtype,
is_datetime64_any_dtype,
is_dtype_equal,
is_float,
is_integer,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
InvalidIndexError,
_index_shared_docs,
ensure_index,
maybe_extract_name,
)
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import DatetimeIndex, Index
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
from pandas.core.tools.datetimes import DateParseError
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="PeriodIndex or list of Periods"))
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "to_timestamp", "asfreq", "start_time", "end_time"]
+ PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "freq", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods.
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
dayofyear
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=year_arr, quarter=q_arr)
"""
_typ = "periodindex"
_attributes = ["name", "freq"]
# define my properties & methods for delegation
_is_numeric_dtype = False
_infer_as_myclass = True
_data: PeriodArray
freq: DateOffset
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
tz=None,
dtype=None,
copy=False,
name=None,
**fields,
):
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: PeriodArray, name: Label = None):
"""
Create a new PeriodIndex.
Parameters
----------
values : PeriodArray
Values that can be converted to a PeriodArray without inference
or coercion.
"""
assert isinstance(values, PeriodArray), type(values)
result = object.__new__(cls)
result._data = values
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result.name = name
result._cache = {}
result._reset_identity()
return result
# ------------------------------------------------------------------------
# Data
@property
def values(self):
return np.asarray(self)
@property
def _has_complex_internals(self):
# used to avoid libreduction code paths, which raise or require conversion
return True
def _shallow_copy(self, values=None, name: Label = no_default):
name = name if name is not no_default else self.name
cache = self._cache.copy() if values is None else {}
if values is None:
values = self._data
result = self._simple_new(values, name=name)
result._cache = cache
return result
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, DateOffset):
freqstr = other.rule_code
base = libfrequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Rendering Methods
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.astype(object)._values
@property
def _formatter_func(self):
return self.array._formatter(boxed=False)
# ------------------------------------------------------------------------
# Indexing
@cache_readonly
def _engine(self):
# To avoid a reference cycle, pass a weakref of self._values to _engine_type.
period = weakref.ref(self._values)
return self._engine_type(period, len(self))
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
hash(key)
try:
self.get_loc(key)
return True
except KeyError:
return False
@cache_readonly
def _int64index(self) -> Int64Index:
return Int64Index._simple_new(self.asi8, name=self.name)
# ------------------------------------------------------------------------
# Index Methods
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if func is np.add:
pass
elif func is np.subtract:
name = self.name
left = context[1][0]
right = context[1][1]
if isinstance(left, PeriodIndex) and isinstance(right, PeriodIndex):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if "M->M" not in func.types:
msg = f"ufunc '{func.__name__}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg)
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return type(self)(result, freq=self.freq, name=self.name)
def asof_locs(self, where, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx._values, freq=self.freq)
elif not isinstance(where_idx, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
elif where_idx.freq != self.freq:
raise raise_on_incompatible(self, where_idx)
locs = self.asi8[mask].searchsorted(where_idx.asi8, side="right")
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.asi8 < self.asi8[first])] = -1
return result
@doc(Index.astype)
def astype(self, dtype, copy=True, how="start"):
dtype = pandas_dtype(dtype)
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
# TODO: should probably raise on `how` here, so we don't ignore it.
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = ensure_index(target)
if isinstance(target, PeriodIndex):
if target.freq != self.freq:
# No matches
no_matches = -1 * np.ones(self.shape, dtype=np.intp)
return no_matches
target = target.asi8
self_index = self._int64index
else:
self_index = self
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
if self_index is not self:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return Index.get_indexer(self_index, target, method, limit, tolerance)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ensure_index(target)
if not self._is_comparable_dtype(target.dtype):
no_matches = -1 * np.ones(self.shape, dtype=np.intp)
return no_matches, no_matches
target = target.asi8
indexer, missing = self._int64index.get_indexer_non_unique(target)
return ensure_platform_int(indexer), missing
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parseable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not is_scalar(key):
raise InvalidIndexError(key)
if isinstance(key, str):
try:
loc = self._get_string_slice(key)
return loc
except (TypeError, ValueError):
pass
try:
asdt, reso = parse_time_string(key, self.freq)
except DateParseError as err:
# A string with invalid format
raise KeyError(f"Cannot interpret '{key}' as period") from err
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
# _get_string_slice will handle cases where grp < freqn
assert grp >= freqn
if grp == freqn:
key = Period(asdt, freq=self.freq)
loc = self.get_loc(key, method=method, tolerance=tolerance)
return loc
elif method is None:
raise KeyError(key)
else:
key = asdt
elif is_integer(key):
# Period constructor will cast to string, which we dont want
raise KeyError(key)
try:
key = Period(key, freq=self.freq)
except ValueError as err:
# we cannot construct the Period
raise KeyError(orig_key) from err
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_slice_bound(self, label, side: str, kind: str):
"""
If label is a string or a datetime, cast it to Period.ordinal according
to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'}
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem"]
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, str):
try:
parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == "left" else 1]
except ValueError as err:
# string cannot be parsed as datetime-like
# TODO: we need tests for this case
raise KeyError(label) from err
elif is_integer(label) or is_float(label):
self._invalid_indexer("slice", label)
return label
def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
if reso not in ["year", "month", "quarter", "day", "hour", "minute", "second"]:
raise KeyError(reso)
grp = resolution.Resolution.get_freq_group(reso)
iv = Period(parsed, freq=(grp, 1))
return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end"))
def _validate_partial_date_slice(self, reso: str):
grp = resolution.Resolution.get_freq_group(reso)
freqn = resolution.get_freq_group(self.freq)
if not grp < freqn:
# TODO: we used to also check for
# reso in ["day", "hour", "minute", "second"]
# why is that check not needed?
raise ValueError
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
# TODO: Check for non-True use_lhs/use_rhs
parsed, reso = parse_time_string(key, self.freq)
try:
return self._partial_date_slice(reso, parsed, use_lhs, use_rhs)
except KeyError as err:
raise KeyError(key) from err
def insert(self, loc, item):
if not isinstance(item, Period) or self.freq != item.freq:
return self.astype(object).insert(loc, item)
i8result = np.concatenate(
(self[:loc].asi8, np.array([item.ordinal]), self[loc:].asi8)
)
arr = type(self._data)._simple_new(i8result, dtype=self.dtype)
return type(self)._simple_new(arr, name=self.name)
def join(self, other, how="left", level=None, return_indexers=False, sort=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
if not isinstance(other, PeriodIndex):
return self.astype(object).join(
other, how=how, level=level, return_indexers=return_indexers, sort=sort
)
result = Int64Index.join(
self,
other,
how=how,
level=level,
return_indexers=return_indexers,
sort=sort,
)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
# ------------------------------------------------------------------------
# Set Operation Methods
def _assert_can_do_setop(self, other):
super()._assert_can_do_setop(other)
# *Can't* use PeriodIndexes of different freqs
# *Can* use PeriodIndex/DatetimeIndex
if isinstance(other, PeriodIndex) and self.freq != other.freq:
raise raise_on_incompatible(self, other)
def _setop(self, other, sort, opname: str):
"""
Perform a set operation by dispatching to the Int64Index implementation.
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
res_name = get_op_result_name(self, other)
other = ensure_index(other)
i8self = Int64Index._simple_new(self.asi8)
i8other = Int64Index._simple_new(other.asi8)
i8result = getattr(i8self, opname)(i8other, sort=sort)
parr = type(self._data)(np.asarray(i8result, dtype=np.int64), dtype=self.dtype)
result = type(self)._simple_new(parr, name=res_name)
return result
def intersection(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other = ensure_index(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not is_dtype_equal(self.dtype, other.dtype):
# TODO: fastpath for if we have a different PeriodDtype
this = self.astype("O")
other = other.astype("O")
return this.intersection(other, sort=sort)
return self._setop(other, sort, opname="intersection")
def difference(self, other, sort=None):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other = ensure_index(other)
if self.equals(other):
# pass an empty PeriodArray with the appropriate dtype
return type(self)._simple_new(self._data[:0], name=self.name)
if is_object_dtype(other):
return self.astype(object).difference(other).astype(self.dtype)
elif not is_dtype_equal(self.dtype, other.dtype):
return self
return self._setop(other, sort, opname="difference")
def _union(self, other, sort):
if not len(other) or self.equals(other) or not len(self):
return super()._union(other, sort=sort)
# We are called by `union`, which is responsible for this validation
assert isinstance(other, type(self))
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype("O")
other = other.astype("O")
return this._union(other, sort=sort)
return self._setop(other, sort, opname="_union")
# ------------------------------------------------------------------------
def _apply_meta(self, rawarr) -> "PeriodIndex":
if not isinstance(rawarr, PeriodIndex):
if not isinstance(rawarr, PeriodArray):
rawarr = | PeriodArray(rawarr, freq=self.freq) | pandas.core.arrays.period.PeriodArray |
"""
This library contains a set of functions that help you detect similar
images in your archive. It will detect the 'best' image per group using a
high-pass filter and copy these to another folder.
Thus, you do not have to pre-select images for your photo show yourself
(content itself is not considered a quality critera).
"""
from functools import partial
import glob
import os
import shutil
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as sts
from sklearn.linear_model import LogisticRegression
version = '0.1.3'
def split_filepath(file, split=False):
"""
Function to split file from path and return either of them.
Parameters:
------------------------------
file: (str), full path to file
split: (bool), flag, if path shall be returned
Returns:
------------------------------
str, Filename
"""
file = file.replace('\\', '/')
if split is True:
if os.path.isfile(file):
_ = file.split('/')
return '/'.join(_[:-2]), _[-1]
else:
if os.path.exists(file):
if file[-1] is not '/':
file = file + '/'
return file
else:
print('The file/path {} does not exist.'.format(file))
else:
return file
def read_files(path, ext):
"""
Function to read image filenames from a directory and their creation date.
Parameters:
------------------------------
path: (str), filename
ext: (str), extension for files to be considered
Returns:
------------------------------
list, Filenames
"""
files = glob.glob('{}*.{}'.format(path, ext))
# file modification date seems to be more reliable
files = [(split_filepath(f, split=True)[-1], os.stat(f).st_mtime) for f in files]
#files = [(split_filepath(f, split=True)[-1], os.path.getctime(f)) for f in files]
return files
def read_img(file, read_type=None):
"""
Function to read images.
Parameters:
------------------------------
file: (str), filename
read_type: (str), different opencv conversions of images
Returns:
------------------------------
array, Image
"""
if read_type == None:
return cv2.imread(file)
elif read_type == 'rgb':
return cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB)
elif read_type == 'hsv':
return cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2HSV)
elif read_type == 'gray':
return cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2GRAY)
def resize_img(img, scale=0.2):
"""
Function resizes image.
Parameters:
------------------------------
img: (array), image
scale: (float), factor used for reduction (1.0 means no change)
Returns:
------------------------------
array, Image
"""
x, y = int(img.shape[0]*scale), int(img.shape[1]*scale)
return cv2.resize(img, (y, x))
def calculate_hist(img, channels=[0], mask=None, histSize=[256], ranges=[0, 256]):
"""
Function to calculate histograms for image data.
Refer to cv2.calcHist of opencv library for more information.
Returns:
------------------------------
array, Histogramm of Image
"""
return cv2.calcHist([img], channels=channels, mask=None, histSize=histSize, ranges=ranges)
def minimize_image(img, min_pix=8):
"""
Function that resizes images to icon size, leaving only the bare silhouette of the image.
Parameters:
------------------------------
img: (array), input image
min_pix: (int, tuple), min value for the min axis
Returns:
------------------------------
array, Resized image.
"""
if len(img.shape) == 2:
y, x = img.shape
elif len(img.shape) == 3:
y, x, _ = img.shape
if type(min_pix) == int:
if x <= y:
y = int(y/x * 8)
x = 8
else:
x = int(x/y * 8)
y = 8
elif type(min_pix) == tuple:
x, y = min_pix
return cv2.resize(img, (x, y))
def copy_images(df, path=('./images/', './processed/'), rank_col=None, crit_col=None):
"""
Function that copies the processed images to the destination repository.
Parameters:
------------------------------
df: (pandas DataFrame), that is grouped by column rank_col
path = (tuple, str), (input_path, output_path) of images
rank_col: (str), ranked column in the DataFrame
crit_col: (str), the column containing the criteria, greater
values are better
Returns:
------------------------------
None
"""
for cur_file in df.loc[df.groupby([rank_col])[crit_col].idxmax(), 'file'].values:
shutil.copy2(path[0] + cur_file, path[1] + cur_file)
def rotate_img(img, degree=0):
"""
Function that rotates images.
Parameters:
------------------------------
img = (np.array), input image
degree = (float), expected rotation degree (between 0° and 360°)
Returns:
------------------------------
array, Rotated image.
"""
rows, cols = img.shape
M_rot = cv2.getRotationMatrix2D((cols/2,rows/2), degree, 1)
img_rot = cv2.warpAffine(img, M_rot, (cols,rows),
flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REPLICATE)
return img_rot
def warp_img(img, scale=0.0, how=None):
"""
Function that warps images.
Parameters:
------------------------------
img = (np.array), input image
scale = (float), needs to be in [0, 0.5), defines how much the image axis
is stretched
how = (str), stretched axis, has to be in ("bottom", "top", "left", "right")
Returns:
------------------------------
array, Warped image.
"""
rows, cols = img.shape
if how == 'bottom':
pinp = np.float32([[rows*scale,0],[rows*(1 - scale),0],[0,cols],[rows,cols]]) # squeeze bottom x
elif how == 'top':
pinp = np.float32([[0,0],[rows,0],[rows*scale,cols],[rows*(1 - scale),cols]]) # squeeze top x
elif how == 'left':
pinp = np.float32([[0,cols*scale],[rows,0],[0,cols*(1-scale)],[rows,cols]]) # squeeze left side
elif how == 'right':
pinp = np.float32([[0,0],[rows,cols*scale],[0,cols],[rows,cols*(1-scale)]]) # squeeze right side
else:
print('Parameter how has to be in "bottom", "top", "left", "right".')
pout = np.float32([[0,0], [rows,0], [0,cols], [rows,cols]])
M_warp = cv2.getPerspectiveTransform(pinp, pout)
img_warp = cv2.warpPerspective(img, M_warp, (cols,rows),
flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REPLICATE)
return img_warp
def compare_hashes_adv(images, hash_dim=(8, 8), range_deg=(-5, 5, 1),
warping=('left', 'right', 'top', 'bottom'),
range_warp=(0.01, 0.06, 0.01), return_hash_only=False):
"""
Advanced function that compares the hashes of consecutive images and takes
small variations into account.
Parameters:
------------------------------
images = (list), list of images (arrays)
hash_dim = (tuple), expected x & y pixel of the hashed images
range_deg = (tuple), (min_value, max_value, step) for image rotation
warping = (tuple), how to warp image, choose and combine ('left', 'right', 'top', 'bottom')
range_warp = (tuple), (min_value, max_value, step) for image warping,
min/max_values have to be in [0, 0.5)
return_hash_only = (bool), if True, only true image hash is calculated
Returns:
------------------------------
List of hash similarity of consecutive images.
"""
def hash_checker(img_a, img_b):
return sum([1 if i[0] == i[1] else 0 for i in zip(img_a, img_b)])
if return_hash_only is True:
images = [minimize_image(img, min_pix=hash_dim) for img in images]
img_mean = [np.mean(img) for img in images]
imgs_reshaped = [img.reshape(-1) for img in images]
hashes = []
for enum, cur_img in enumerate(imgs_reshaped):
hashes.append([1 if px > img_mean[enum] else 0 for px in cur_img])
compared_hashes = [hash_checker(*pair) for pair in zip(hashes, hashes[1:] + hashes[:1])]
else:
images_adv = []
for img in images:
images_ = [img]
images_ += [rotate_img(img, deg) for deg in range(*range_deg)]
images_ += [warp_img(img, scale, how=how) for how in warping
for scale in np.arange(*range_warp)]
images_adv.append([minimize_image(img, min_pix=hash_dim).reshape(-1) for img in images_])
img_mean = [np.mean(img) for images in images_adv for img in images]
ix, iy = len(images), len(images_adv[0])
img_mean = np.array(img_mean).reshape(ix, iy)
hashes = []
for enum, cur_img in enumerate(images_adv):
var = []
for enum2, variant in enumerate(cur_img):
var.append([1 if px > img_mean[enum][enum2] else 0 for px in variant])
hashes.append(var)
def hash_checker(img_a, img_b):
return sum([1 if i[0] == i[1] else 0 for i in zip(img_a, img_b)])
compared_hashes = []
for pair in zip(hashes, hashes[1:] + hashes[:1]):
a, b = pair
max_hash = 0
for img_b in b:
cur_hash = hash_checker(a[0], img_b)
if cur_hash > max_hash:
max_hash = cur_hash
compared_hashes.append(max_hash)
return compared_hashes
def high_pass_filter(img, x_shift=30, y_shift=30):
"""
High-pass filter for images, calculates the magnitude spectrum.
Parameters:
------------------------------
img: (array), two dimensional image data
x_shift, y_shift: (int), filter threshold, 0 means no filtering at all
Returns:
------------------------------
array, Magnitude spectrum of the image
"""
rows, cols = img.shape
_row, _col = int(rows/2), int(cols/2)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
fshift[_row - y_shift:_row + y_shift, _col - x_shift:_col + x_shift] = 0 # here happens the filtering
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
flattened_spectrum = img_back.reshape(-1, 1)
x_hist, y_hist = np.histogram(flattened_spectrum, bins=255)
y_hist_corr = [(y_hist[i] + y_hist[i+1])/2.0 for i in range(len(y_hist)-1)]
mag_spectrum = np.cumsum(y_hist_corr)[-1]
return mag_spectrum
def calculate_performance(labels, predictions, n_img=None):
"""
Returns performance summary for ranking evaluation - can be used for calibration.
Has two contributions:
Parameters:
------------------------------
labels = (series, array), true labels
predictions = (series, array), predicted labels
n_img = (int), true number of groups
Returns:
------------------------------
tuple, (image groups found,
unique image groups found,
true number of image groups
normalized reduction ratio (1 is best ))
"""
df_perf = pd.DataFrame(np.c_[labels, predictions], columns=['labels', 'pred'])
df_grouped = df_perf.groupby(['pred'])['labels'].apply(lambda x: len(set(x))).to_frame()
groups_found = min(n_img, len(set(df_grouped.index[df_grouped['labels']==1].values)))
groups_unique = len(df_perf['pred'].unique())
max_red = n_img / len(df_perf)
reduction = groups_unique / len(df_perf) * groups_found / n_img
reduction /= max_red
return (groups_found,
groups_unique,
n_img,
reduction)
def calc_correlations(images, method):
"""
Function to calculate correlations between images.
Parameters:
------------------------------
images: list, of images read in HSV mode
method: str, 'bhattacharyya', 'correl' refer to cv2 for further information
Returns:
------------------------------
List of calculated correlation measures.
"""
methods = {'bhattacharyya': cv2.HISTCMP_BHATTACHARYYA,
'correl': cv2.HISTCMP_CORREL,
}
img_hists = [calculate_hist(cur_img, [0,1], None, [180, 256], [0,180, 0,256]) for cur_img in images]
cors = [cv2.compareHist(img_hists[i-1], img_hists[i], methods[method]) for i in range(len(img_hists))]
cors.append(cors.pop(0))
if method == 'bhattacharyya':
cors = list(map(lambda x: 1 - x, cors))
return cors
def timelag_ranker(series, max_lag=5, max_per_group=5):
def timelag_ranker_(series, max_lag, max_per_group):
"""
Ranker function, distinguishes images based on difference timestamps.
Parameters:
------------------------------
series = pandas Series of timestamps
max_lag = (float), seconds between images
group_default = (int), maximum number of images per group
Returns:
------------------------------
Generator of Rank
"""
rank = 1
n_group = 0
for row in series.iloc[:-1]:
yield rank
n_group += 1
if row > max_lag:
rank += 1
n_group = 0
elif n_group > max_per_group:
rank += 1
n_group = 0
yield rank
return list(timelag_ranker_(series, max_lag, max_per_group))
def hash_ranker(series, dim=None, limit=0.875):
def hash_ranker_(series, dim, limit):
"""
Ranker function, distinguishes images based on similary of image hashes.
Hash refers here to low resolution images (~8 px per x and y dimensions)
For advanced hashing, higher limit is recommended, e.g., limit ~= 0.925.
Parameters:
------------------------------
series = pandas Series, containing image hash values
dims = (int), number of image dimensions (in px)
limit = (float), lower limit for hash similarity to be recognized
as similar imageS
Returns:
------------------------------
Generator of Rank
"""
rank = 1
for row in series.iloc[:-1]:
yield rank
if row < np.product(dim) * limit:
rank += 1
yield rank
return list(hash_ranker_(series, dim, limit))
def corr_ranker(series, limits={'bhattacharyya': 0.61, 'correl': 0.8}):
def corr_ranker(series, limits):
"""
Ranker function, distinguishes images based on correlation of image (has to be provided).
Parameters:
------------------------------
series = pandas Series, containing image hash values
limits = (dict), where keys refer to the method used and the values
are the lower bounds for recognition as similar images.
Returns:
------------------------------
Generator of Rank
"""
name = series.name.split('_')[0]
rank = 1
for row in series.iloc[:-1]:
yield rank
if row < limits[name]:
rank += 1
yield rank
return list(corr_ranker(series, limits))
def vote_ranker(series, limit=0.41):
def vote_ranker_(series, limit):
"""
Ranker function, mean of differences of ranks (from different methods)
serves as discriminator of ranks from ensemble.
Parameters:
------------------------------
series = pandas Series, containing image average
limit = (dict), has to be in [0, 1]
Returns:
------------------------------
Generator of Rank
"""
rank = 1
for row in series.iloc[:-1]:
yield rank
if row < limit:
rank += 1
yield rank
return list(vote_ranker_(series, limit))
def calculate_axis_ratio(img):
"""
Function returns axis-ratio of image.
Parameters:
------------------------------
img: (array), input image
Returns:
------------------------------
float, Axis ratio (horizontal/vertical)
"""
if len(img.shape) == 2:
h, v = img.shape
elif len(img.shape) == 3:
h, v, _ = img.shape
else:
h, v = img.shape[0], img.shape[1]
return h/v
def img_shape_ranker(series, limit=0.01):
def img_shape_ranker_(series, limit):
"""
Ranker function, checks if a the image axis ratio changes (horizontal to vertical
and vice versa).
Parameters:
------------------------------
series = pandas Series, containing image average
limit = (dict), has to be in (0, np.inf]
Returns:
------------------------------
Generator of Rank
"""
rank = 1
for row in series.iloc[:-1]:
yield rank
if abs(row) >= limit:
rank += 1
yield rank
return list(img_shape_ranker_(series, limit))
def batch_hashing(df, n_dims=(8, 64)):
"""
Function that calculates the image hashes for different image dimensions.
Parameters:
------------------------------
df: (pandas DataFrame), dataframe containing at least the following columns:
target, gray_images
n_dims: (tuple, int), (min_value, max_value) for quadratic image hashes
Returns:
------------------------------
list of pandas DataFrames
"""
df = df.copy()
targets = np.arange(0, len(df['target'].unique()))
runs = []
for i in range(*n_dims, 1):
df['hash_value'] = compare_hashes_adv(df['gray_images'], hash_dim=(i, i),
return_hash_only=True)
df['hash_value'] /= (i*i)
runs.append(df[['target', 'hash_value']].copy())
return runs
def return_hashing_dist(data, _type=None, target_col=None, comp_col=None):
"""
Function that calculates the hash values for (first) similar & non-similar images
Parameters:
------------------------------
data: (list of pandas DataFrame), each dataframe is expected to contain
at least the following columns:
target, hash_value
_type: (str), choose between 'similar' and 'nonsimilar'
target_col: (str), column name of the target column
comp_col: (str), column name of the column used for comparison
Returns:
------------------------------
list of pandas DataFrames
"""
if _type == 'similar':
similar = []
for cur_res in data:
#TODO: calculate mean of element [0,n-1] if group size > 2
rel_rows = (cur_res.groupby([target_col])[comp_col].agg('count') > 1).values
similar.append(cur_res.groupby([target_col])[comp_col].first().iloc[rel_rows])
return similar
elif _type == 'nonsimilar':
nonsimilar = np.array([cur_res.groupby([target_col])[comp_col].last().values
for cur_res in data])
return nonsimilar
def make_logreg_fit(similar, nonsimilar, make_plot=True,
labels=('x', 'Probability'), limits=None):
"""
Function that performs a logistic regression fit on the groups of similar
and non-similar images and provides the threshold value (+ plot).
Parameters:
------------------------------
similar: (array), containing the hash values found
for similar groups
nonsimilar: (array), containing the hash values found
for non-similar groups
make_plot: (bool), create plot if True
labels: (tuple), x- and y-label for the plot
limits: (tuple), (min_x, max_x) for plots if not None
Returns:
------------------------------
float, threshold.
"""
X_train = np.append(nonsimilar, similar)
X_train = np.c_[X_train, np.ones(len(X_train))]
y_train = np.array(len(nonsimilar)*[0] + len(similar)*[1])
if limits is None:
min_val, max_val = min(X_train[:,0]), max(X_train[:,0])
else:
min_val, max_val = limits
lreg = LogisticRegression()
lreg.fit(X_train, y_train)
x_vals = np.arange(min_val*0.8, max_val*1.2, 0.001)
probs = lreg.predict_proba(np.c_[x_vals, np.ones(len(x_vals))])
lower_bound = np.argmax(probs[probs<=0.50])
if make_plot is True:
fig, axs = plt.subplots(2, 1, figsize=(15, 10), sharex=True,
gridspec_kw={'height_ratios': [5, 1]},
tight_layout=True)
for paxis in range(2):
axs[paxis].plot([x_vals[lower_bound], x_vals[lower_bound]], [-0.1, 1.1], '-',
color='gray', alpha=0.5, lw=10)
axs[0].plot(x_vals, probs[:,1], 'b-', label='Probability curve', alpha=0.5)
axs[0].scatter(X_train[y_train==0, 0], lreg.predict_proba(X_train)[y_train==0, 1],
marker='.', s=400, ec='gray', label='False', color='red', alpha=.8)
axs[0].scatter(X_train[y_train==1, 0], lreg.predict_proba(X_train)[y_train==1, 1],
marker='.', s=400, ec='gray', label='True', color='green', alpha=.8)
axs[1].eventplot(similar, lineoffsets=[0.2], linelengths=0.4, linewidths=0.5,
orientation='horizontal', color='green', alpha=0.5)
axs[1].eventplot(nonsimilar, lineoffsets=[0.75], linelengths=0.4, linewidths=0.5,
orientation='horizontal', color='red', alpha=0.5)
axs[0].axis([min_val*0.8, max_val*1.1, -0.1, 1.1])
axs[0].grid()
axs[0].legend(loc='upper left')
xl, yl = labels
axs[1].set_xlabel(xl)
axs[0].set_ylabel(yl)
axs[0].set_title('Logistic regression fit')
axs[1].axis([min_val*0.9, max_val*1.1, -0.1, 1.1])
axs[1].set_ylabel('Event')
axs[1].tick_params(labelleft=False)
axs[1].grid()
plt.show()
threshold = x_vals[lower_bound]
print('Limit at {:.2f}'.format(threshold))
return threshold
def bootstrap_data(df, n_runs=10):
"""
This function shuffles the input data and repeatedly calculates the
ranks using a variety of methods. This provides better estimates on
the average values for both image groups (similar or non-similar).
Parameters:
------------------------------
df: (pandas DataFrame), dataframe containing at least the following columns:
target, creation_date, gray_images, hsv_images, rgb_images
n_runs: (int), how often the experiment is repeated
Returns:
------------------------------
list of pandas DataFrames
"""
true_targets = df['target'].copy()
targets = np.arange(0, len(true_targets.unique()))
#TODO: replace methods with function call
methods = {'avg_hash': [cv2.img_hash.averageHash, 8],
'block_hash': [partial(cv2.img_hash.blockMeanHash,
mode=cv2.img_hash.BLOCK_MEAN_HASH_MODE_1), 121],
'phash': [cv2.img_hash.pHash, 8],
'marr_hildreth_hash': [cv2.img_hash.marrHildrethHash, 72],
'radial_variance_hash': [cv2.img_hash.radialVarianceHash, 40]}
runs = []
for i in range(n_runs):
np.random.seed(i)
new_targets = np.random.choice(targets, size=len(targets), replace=False)
# add some "randomness" by reversing images of each group
if i % 2 == 0:
df.sort_values(['creation_date'], inplace=True)
else:
df.sort_values(['creation_date'], ascending=False, inplace=True)
df['target'] = true_targets.map(dict(zip(targets, new_targets)))
df.sort_values(['target'], inplace=True)
df['hash_value'] = compare_hashes_adv(df['gray_images'].tolist(),
return_hash_only=True)
df['hash_value_adv'] = compare_hashes_adv(df['gray_images'].tolist(),
)
df['correl_corr'] = calc_correlations(df['hsv_images'].tolist(),
'correl')
df['bhattacharyya_corr'] = calc_correlations(df['hsv_images'].tolist(),
'bhattacharyya')
#TODO: method call with function call
for k, (func, dim) in methods.items():
df[k+'_value'] = df['rgb_images'].apply(lambda x: func(x))
df[k+'_value_tmp'] = np.append(df[k+'_value'].values[1:],
df[k+'_value'].values[:1])
df[k+'_value_cmp'] = df[[k+'_value', k+'_value_tmp']].apply(
lambda x: cv2.norm(x[0][0], x[1][0], cv2.NORM_HAMMING), axis=1)
df[k+'_value_cmp'] = 1 - df[k+'_value_cmp'] / (8 * dim)
add_cols = [c for c in df.columns if c.endswith('_cmp')]
runs.append(df[['target', 'creation_date', 'hash_value',
'hash_value_adv','correl_corr', 'bhattacharyya_corr'] + add_cols])
return runs
def return_dist(data, _type=None, target_col=None, comp_col=None):
"""
Function that calculates the comparison values for (first) similar & non-similar images
Parameters:
------------------------------
data: (list of pandas DataFrame), each dataframe is expected to contain
at least the following columns:
target, hash_value
_type: (str), choose between 'similar' and 'nonsimilar'
target_col: (str), column name of the target column
comp_col: (str), column name of the column used for comparison
Returns:
------------------------------
list of pandas DataFrames
"""
if _type == 'similar':
similar = []
for cur_res in data:
#TODO: calculate mean of element [0,n-1] if group size > 2
rel_rows = (cur_res.groupby([target_col])[comp_col].agg('count') > 1).values
similar.append(cur_res.groupby([target_col])[comp_col].first().iloc[rel_rows])
similar = np.sort(np.array(similar).reshape(-1))
return similar
elif _type == 'nonsimilar':
nonsimilar = np.sort(np.array([cur_res.groupby([target_col])[comp_col].last().values
for cur_res in data]).reshape(-1))
return nonsimilar
def plot_distributions(similar, nonsimilar, bins=10, labels=('x', 'y'), title=''):
"""
Function that plots the histogram + kernel density functions
of comparison value for (first) similar & non-similar distributions.
Parameters:
------------------------------
similar: (array), containing the hash values found
for similar groups
nonsimilar: (array), containing the hash values found
for non-similar groups
bins: (int), number of bins in histogram
label: (tuple), x- and y-label for plot
title: (str), plot title
Returns:
------------------------------
None
"""
full_vals = np.append(similar, nonsimilar)
xmin, xmax = min(full_vals), max(full_vals)
margin = (xmax - xmin)*0.1
xrange = np.arange(xmin - margin*2, xmax + margin*2, 0.01)
plt.figure(figsize=(15, 5))
kde_sim = sts.gaussian_kde(similar)
plt.hist(similar, bins=bins, rwidth=0.9, density=True,
label='first', color='gold', alpha=0.7);
plt.plot(xrange, kde_sim(xrange), lw=2, ls='-',
color='#6666ff', label='similar-KDE')
kde_nonsim = sts.gaussian_kde(nonsimilar)
plt.hist(nonsimilar, bins=bins, rwidth=0.9, density=True,
label='last', color='gray', alpha=0.7);
plt.plot(xrange, kde_nonsim(xrange), lw=2, ls='-',
color='#ff6666', label='last-KDE')
plt.xlim([xmin - margin*2, xmax + margin*2])
plt.xlabel(labels[0])
plt.ylabel(labels[1])
plt.title(title)
plt.legend()
plt.grid()
plt.show()
def performance_report(similar, nonsimilar, limit=None, add_std=False):
"""
Prints a performance report.
Parameters:
------------------------------
similar: (array), containing the hash values found
for similar groups
nonsimilar: (array), containing the hash values found
for non-similar groups
limit: (float), threshold value, if None, mean is used
add_std: (bool), if True, standard deviation is added to the
mean threshold, works only if limit is None
Returns:
------------------------------
tuple (precision_score, recall_score, f1_score)
"""
def precision_score(tp, fp, eps=1e-10):
return (tp + eps) / (tp + fp + eps)
def recall_score(tp, fn, eps=1e-10):
return (tp + eps) / (tp + fn + eps)
def f1_score(tp, fp, fn, eps=1e-10):
return 2 / (1/precision_score(tp, fp) + 1/recall_score(tp, fn))
if limit is None:
limit = similar.mean()
if add_std is True:
limit += similar.std()
tp = sum([1 for c in similar if c >= limit])
fn = len(similar) - tp
tn = sum([1 for c in nonsimilar if c < limit])
fp = len(nonsimilar) - tn
p_score, r_score = precision_score(tp, fp), recall_score(tp, fn)
f_score = f1_score(tp, fp, fn)
print('Performance report\n'+'-'*50)
print('True positive: {} -- False negative: {}'.format(tp, fn))
print('True negative: {} -- False positive: {}'.format(tn, fp))
print('\nPrecision score: {:.4f}'.format(p_score))
print('Recall score: {:.4f}'.format(r_score))
print('F1 score: {:.4f}'.format(f_score))
return (p_score, r_score, f_score)
def plot_summary(df):
"""
Plots histogramms for the methods used.
Parameters:
------------------------------
df: (pandas DataFrame), expecting index to contain names of methods and
columns
- 'groups_found' (i.e., the number of
image groups identified)
- 'reduction'
(i.e., the standardized reduction)
- 'groups_true' (i.e., the true number of image
groups)
Returns:
------------------------------
None.
"""
fig, axs = plt.subplots(1,2, sharey=True, figsize=(15, 10),
tight_layout=True)
methods = [m.replace('_rank', '').capitalize() for m in df.index]
n_img = df['groups_true'].values[0]
axs[0].barh(methods, df['groups_found'], alpha=0.6)
axs[0].plot([n_img, n_img], [-1, len(df)+1], 'r')
axs[0].set_xlim([0, df['groups_found'].max()*1.05])
axs[1].set_ylim([-0.5, len(df)])
axs[0].grid(which='major', axis='x', alpha=0.8)
axs[0].set_xlabel('Images identified')
axs[0].set_title('Images identified by different ranking methods\n')
axs[1].barh(methods, df['reduction'], alpha=0.6)
axs[1].plot([1.0, 1.0], [-1, len(df)+1], 'r')
axs[1].set_xlim([0, df['reduction'].max()*1.05])
axs[1].set_ylim([-0.5, len(df)])
axs[1].grid(which='major', axis='x', alpha=0.8)
axs[1].set_xlabel('Standardized reduction')
axs[1].set_title('Standardized reduction by different ranking methods\n')
plt.show()
def hash_image(image, method):
"""
#TODO: DOCSTRING
# second value in list is used for normalization of hashing comparison value
"""
methods = {'avg_hash': [cv2.img_hash.averageHash, 8],
'block_hash': [partial(cv2.img_hash.blockMeanHash,
mode=cv2.img_hash.BLOCK_MEAN_HASH_MODE_1), 121],
'phash': [cv2.img_hash.pHash, 8],
'marr_hildreth_hash': [cv2.img_hash.marrHildrethHash, 72],
'radial_variance_hash': [cv2.img_hash.radialVarianceHash, 40]}
func, dim = methods[method]
image_values = | pd.Series(image) | pandas.Series |
import datetime
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from numpy.linalg import inv
from scipy.linalg import sqrtm
from sklearn import covariance
from sklearn.base import BaseEstimator
from sklearn.covariance import EmpiricalCovariance
from sklearn.decomposition import PCA
from statsmodels.api import OLS
from statsmodels.tools import add_constant
from .. import tools
# expenses + tax dividend
EXPENSES = {
"CASH": 0.0,
"TMF": 0.0108,
"DPST": 0.0104,
"ASHR": 0.0065,
"TQQQ": 0.0095,
"UGLD": 0.0135,
"ERX": 0.01,
"RING": 0.0039,
"LABU": 0.0109,
"YINN": 0.0152,
"SOXL": 0.0097,
"RETL": 0.0105,
"TYD": 0.0097,
"UDOW": 0.0095,
"GBTC": 0.02,
"FAS": 0.0096,
"MCHI": 0.0064,
"CQQQ": 0.0070,
"CHIX": 0.0065,
"UBT": 0.0095,
"FXI": 0.0074,
"DRN": 0.0109,
"O": 0 + 0.045 * 0.15,
"DSUM": 0.0045 + 0.035 * 0.15,
"SPY": 0.0009,
"TLT": 0.0015,
"ZIV": 0.0135,
"GLD": 0.004,
"BABA": 0.0,
"BIDU": 0.0,
"IEF": 0.0015,
"KWEB": 0.007,
"JPNL": 0.0121,
"EDC": 0.0148,
"EEMV.L": 0.0025,
"IWVL.L": 0.003,
"MVEU.L": 0.0025,
"USMV": 0.0015,
"ACWV": 0.002,
"EFAV": 0.002,
"KRE": 0.0035,
"EEM": 0.0068,
"VNQ": 0.0012 + 0.0309 * 0.15,
"EWJ": 0.0049,
"HYG": 0.0049,
"VLUE": 0.0004,
"SPMV": 0.001,
"IDWP.L": 0.0069,
"ZN": 0.0,
"RFR": 0.0,
}
class CovarianceEstimator(object):
"""Estimator which accepts sklearn objects.
:param w: regularization from paper `Enhanced Portfolio Optimization`, value 0 means no regularization,
value 1 means to ignore covariances
:param frequency: how often should we recalculate covariance matrix, used to speed up MPT prototyping
"""
def __init__(self, cov_est, window, standardize=True, w=0.0, frequency=1):
self.cov_est = cov_est
self.window = window
self.standardize = standardize
self.w = w
self.frequency = frequency
self._last_cov = None
self._last_n = 0
def fit(self, X):
# assert X.mean().mean() < 1.
# reuse covariance matrix
if (
self.frequency > 1
and len(X) - self._last_n < self.frequency
and list(X.columns) == list(self._last_cov.columns)
):
return self._last_cov
# only use last window
if self.window:
X = X.iloc[-self.window :]
# remove zero-variance elements
zero_variance = X.std() == 0
Y = X.iloc[:, ~zero_variance.values]
# most estimators assume isotropic covariance matrix, so standardize before feeding them
std = Y.std()
Y = Y / std
# can estimator handle NaN values?
if getattr(self.cov_est, "allow_nan", False):
self.cov_est.fit(Y)
cov = pd.DataFrame(
self.cov_est.covariance_, index=Y.columns, columns=Y.columns
)
else:
# compute full covariance for non-NaN columns
Yn = Y.dropna(1, how="any")
full_cov = self.cov_est.fit(Yn).covariance_
full_cov = pd.DataFrame(full_cov, index=Yn.columns, columns=Yn.columns)
full_cov = full_cov.reindex(Y.columns).reindex(columns=Y.columns)
# put back NaN columns one by one, compute covariance using
# available history
cols = list(Yn.columns)
for col in set(Y.columns) - set(Yn.columns):
cols.append(col)
c = Y[cols].dropna().cov().loc[col]
full_cov.loc[col, cols] = c
full_cov.loc[cols, col] = c
cov = full_cov.loc[Y.columns, Y.columns]
# standardize back
cov = np.outer(std, std) * cov
# put back zero covariance
cov = cov.reindex(X.columns).reindex(columns=X.columns).fillna(0.0)
# turn on?
# assert np.linalg.eig(cov)[0].min() > 0
# annualize covariance
cov *= tools.freq(X.index)
# regularize
cov = (1 - self.w) * cov + self.w * np.diag(np.diag(cov))
# CASH should have zero covariance
if "CASH" in X.columns:
cov.loc["CASH", :] = 0
cov.loc[:, "CASH"] = 0
self._last_cov = cov
self._last_n = len(X)
return cov
class SharpeEstimator(object):
def __init__(
self,
global_sharpe=0.4,
override_sharpe=None,
override_mean=None,
capm=None,
rfr=0.0,
verbose=False,
cov_estimator=None,
tax_adjustment=None,
):
"""
:param rfr: risk-free rate
"""
self.override_sharpe = override_sharpe or {}
self.override_mean = override_mean or {}
self.capm = capm or {}
self.global_sharpe = global_sharpe
self.rfr = rfr
self.verbose = verbose
self.cov_estimator = cov_estimator
self.tax_adjustment = tax_adjustment
def fit(self, X, sigma):
"""
formula for mean is:
sh * vol + rf - expenses
"""
# estimate sigma again if cov_estimator is present
if self.cov_estimator is not None:
sigma = self.cov_estimator.fit(X - 1)
est_sh = | pd.Series(self.global_sharpe, index=sigma.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 10:59:05 2021
@author: franc
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import json
from collections import Counter, OrderedDict
import math
import torchtext
from torchtext.data import get_tokenizer
from googletrans import Translator
# from deep_translator import GoogleTranslator
# pip install googletrans==4.0.0rc1
import pickle
# pip install pickle-mixin
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
# python -m spacy download es_core_news_sm
import spacy
import fasttext.util
import contractions
import re # libreria de expresiones regulares
import string # libreria de cadena de caracteres
import itertools
import sys
sys.path.append("/tmp/TEST")
from treetagger import TreeTagger
import pathlib
from scipy.spatial import distance
from scipy.stats import kurtosis
from scipy.stats import skew
class NLPClass:
def __init__(self):
self.numero = 1
nltk.download('wordnet')
def translations_dictionary(self, df_translate=None, path=""):
'''
It appends to a dictionary different animals names in spanish and
english languages. It adds them so that english animals names appear
in WordNet synset.
Parameters
----------
df_translate : pandas.dataframe, optional.
If it's not None, the rows are appended. Otherwise it's
initialized and then the rows are appended.
The default is None.
path : string, optional
The path where to save the pickle file with the dictionary. Unless
path is empty.
The default is "".
Returns
-------
df_translate : pandas.dataframe.
Pandas.dataframe with the new rows appended.
'''
df_auxiliar = pd.DataFrame(columns=['spanish','english'])
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yaguareté"], 'english': ["jaguar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["llama"], 'english': ["llama"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["picaflor"], 'english': ["hummingbird"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chita"], 'english': ["cheetah"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["torcaza"], 'english': ["dove"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yacaré"], 'english': ["alligator"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["corvina"], 'english': ["croaker"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vizcacha"], 'english': ["viscacha"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["orca"], 'english': ["killer_whale"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["barata"], 'english': ["german_cockroach"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["coipo"], 'english': ["coypu"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cuncuna"], 'english': ["caterpillar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["carpincho"], 'english': ["capybara"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["jote"], 'english': ["buzzard"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["zorzal"], 'english': ["fieldfare"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guanaco"], 'english': ["guanaco"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["pejerrey"], 'english': ["silverside"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["mandril"], 'english': ["mandrill"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["peludo"], 'english': ["armadillo"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chingue"], 'english': ["skunk"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guaren"], 'english': ["brown_rat"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cata"], 'english': ["budgerigar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append( | pd.DataFrame({'spanish': ["bonito"], 'english': ["atlantic_bonito"]}) | pandas.DataFrame |
Subsets and Splits