prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
methods = {
"cumsum": np.cumsum,
"cumprod": np.cumprod,
"cummin": np.minimum.accumulate,
"cummax": np.maximum.accumulate,
}
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
@pytest.mark.parametrize("method", ["cummin", "cummax"])
def test_cummin_cummax(self, datetime_series, method):
ufunc = methods[method]
result = getattr(datetime_series, method)().values
expected = ufunc(np.array(datetime_series))
tm.assert_numpy_array_equal(result, expected)
ts = datetime_series.copy()
ts[::2] = np.NaN
result = getattr(ts, method)()[1::2]
expected = ufunc(ts.dropna())
result.index = result.index._with_freq(None)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import statsmodels
from matplotlib import pyplot
from scipy import stats
import statsmodels.api as sm
import warnings
from itertools import product
import datetime as dt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
from pandas import DataFrame
from pandas import concat
from pandas import Series
from math import sqrt
from sklearn.metrics import mean_squared_error
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
data = pd.read_csv('Data/All_Merged.csv') # , parse_dates=[0], date_parser=dateparse
data.isna().sum()
# Inserting 0 for NA
data.fillna(0, inplace=True)
# plt.figure(figsize=[10,4])
# plt.title('BTC Price (USD) Daily')
# plt.plot(data.price, '-', label='Daily')
# Monthly
data['date'] = pd.to_datetime(data['date'])
data['date'] = data['date'].dt.tz_localize(None)
data = data.groupby([pd.Grouper(key='date', freq='M')]).first().reset_index()
data = data.set_index('date')
data['price'].fillna(method='ffill', inplace=True)
# Decomposition - only for price though!
# decomposition = sm.tsa.seasonal_decompose(data.price)
#
# trend = decomposition.trend
# seasonal = decomposition.seasonal
# residual = decomposition.resid
#
# fig = plt.figure(figsize=(10,8))
#
# plt.subplot(411)
# plt.plot(data.price, label='Original')
# plt.legend(loc='best')
# plt.subplot(412)
# plt.plot(trend, label='Trend')
# plt.legend(loc='best')
# plt.subplot(413)
# plt.plot(seasonal,label='Seasonality')
# plt.legend(loc='best')
# plt.subplot(414)
# plt.plot(residual, label='Residuals')
# plt.legend(loc='best')
#
# fig.suptitle('Decomposition of Prices Data')
# plt.show()
# Setting the data structure
reframed = series_to_supervised(data, 1, 1)
# Also removing the lagged price, as this will be created in the ARIMA
reframed.drop(reframed.columns[[0,8, 9, 10, 11, 12, 13]], axis=1, inplace=True)
print(reframed.head())
# split data
split_date = '2018-06-25'
reframed_train = reframed.loc[reframed.index <= split_date].copy()
reframed_test = reframed.loc[reframed.index > split_date].copy()
# Prøver lige ARIMA på original data
# Det her er en seasonal ARIMA, SARIMA, så nok ekstra resultat efter en regulær ARIMA
# Hjælp til kommentering findes her: https://machinelearningmastery.com/sarima-for-time-series-forecasting-in-python/
# Den fitter fint hvis man ikke opdeler i train og test..
# Initial approximation of parameters
Qs = range(0, 2)
qs = range(0, 3)
Ps = range(0, 3)
ps = range(0, 3)
D=1
d=1
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
len(parameters_list)
x_train = reframed_train.iloc[:,:-1].values
y_train = reframed_train.iloc[:,-1]
x_test = reframed_test.iloc[:,:-1].values
y_test = reframed_test.iloc[:,-1]
# Model Selection
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
try:
model=sm.tsa.statespace.SARIMAX(endog=y_train, exog=x_train, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12),enforce_stationarity=True,
enforce_invertibility=True).fit(disp=-1)
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
# Best Models
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
print(best_model.summary())
# Residual plot of the best model
fig = plt.figure(figsize=(10,4))
best_model.resid.plot()
fig.suptitle('Residual Plot of the Best Model')
print("Dickey–Fuller test:: p=%f" % sm.tsa.stattools.adfuller(best_model.resid)[1])
# Dickey–Fuller test:: p=0.xxx -> Residuals are stationary
df_month2 = data[['price']]
future = pd.DataFrame()
df_month2 = pd.concat([df_month2, future])
df_month2['forecast'] = best_model.predict(start = len(x_train), end = len(x_train)+len(x_test)-1, exog=x_test)
plt.figure(figsize=(8,4))
df_month2.price.plot()
df_month2.forecast.plot(color='r', ls='--', label='Predicted Price')
plt.legend()
plt.title('Bitcoin Prices (USD) Predicted vs Actuals, by months')
plt.ylabel('mean USD')
plt.show()
# Daily version
df = pd.read_csv('Data/All_Merged.csv')
df.isna().sum()
# Inserting 0 for NA
df.fillna(0, inplace=True)
# Date type
df['date'] = pd.to_datetime(df['date'])
df['date'] = df['date'].dt.tz_localize(None)
df = df.groupby([pd.Grouper(key='date', freq='D')]).first().reset_index()
df = df.set_index('date')
df['price'].fillna(method='ffill', inplace=True)
# Setting the data structure
daily_re = series_to_supervised(df, 1, 1)
price = daily_re.iloc[:,0]
da_price = daily_re.iloc[:,0]
daily_re.drop(daily_re.columns[[0,8, 9, 10, 11, 12, 13]], axis=1, inplace=True)
y = daily_re.iloc[:,-1]
print(daily_re.head())
# split data
split_date = '2018-07-11'
daily_re_train = daily_re.loc[daily_re.index <= split_date].copy()
daily_re_test = daily_re.loc[daily_re.index > split_date].copy()
da_price = da_price.loc[da_price.index > split_date].copy()
da_price = da_price.values
# Som i boosting - men uden validation, kun med rolling window
pred_day = 2901-16 # Predict for this day, for the next H-1 days. Note indexing of days start from 0.
H = 30 # Forecast horizon, in days. Note there are about 252 trading days in a year
train_size = int(365 * 0.75) # Use 3 years of data as train set. Note there are about 252 trading days in a year
val_size = int(365 * 0.25)
train_val_size = train_size + val_size # Size of train+validation set
print("No. of days in train+validation set = " + str(train_val_size))
qs = range(0, 3)
ps = range(1, 3)
d = 1
parameters = product(ps, qs)
parameters_list = list(parameters)
len(parameters_list)
# ARIMA igen, men ikke som seasonal og med Training og Test data, på daily niveau
# Initial approximation of parameters
pred = pd.DataFrame()
while daily_re.index[pred_day] < daily_re.index[len(daily_re) - 1]:
x_da_train = daily_re.iloc[pred_day - train_val_size:pred_day,:-1].values
y_da_train = daily_re.iloc[pred_day - train_val_size:pred_day,-1]
x_da_test = daily_re.iloc[pred_day:pred_day + H,:-1].values
y_da_test = daily_re.iloc[pred_day:pred_day + H,-1]
# Model Selection
results = []
best_bic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
try:
model=sm.tsa.ARIMA(endog=y_da_train, exog=x_da_train, order=(param[0], d, param[1])).fit(disp=-1)
except ValueError:
print('wrong parameters:', param)
continue
bic = model.bic
if bic < best_bic:
best_model = model
best_bic = bic
best_param = param
results.append([param, model.aic])
# Best Models
# result_table = pd.DataFrame(results)
# result_table.columns = ['parameters', 'bic']
# print(result_table.sort_values(by = 'bic', ascending=True).head())
# print(best_model.summary())
append = best_model.predict(start = len(x_da_train), end = len(x_da_train)+len(x_da_test)-1, exog=x_da_test).T
pred = pd.concat([pred, append], ignore_index=True)
pred_day = pred_day + H
pred_day = 2901-16 # Reset
price2 = price.iloc[pred_day:]
pred['prev_price'] = price2.values
pred.index = price2.index
pred['pred'] = pred.sum(axis=1)
# price2 = price2.values
# pred = pred.values
# Residual plot of the best model
# fig = plt.figure(figsize=(10,4))
# best_model.resid.plot()
# fig.suptitle('Residual Plot of the Best Model')
# print("Dickey–Fuller test:: p=%f" % sm.tsa.stattools.adfuller(best_model.resid)[1])
# Dickey–Fuller test:: p=0.001213 -> Residuals are stationary
df_month2 = df[['price']]
future = pd.DataFrame()
df_month2 = pd.concat([df_month2, future])
yhat = (pred.T + price2).T.astype('float32')
df_month2['forecast'] = pred['pred']
plt.figure(figsize=(8,4))
df_month2.price.plot()
df_month2.forecast.plot(color='r', ls='--', label='Predicted Price')
plt.legend()
plt.title('Bitcoin Prices (USD) Predicted vs Actuals, by months')
plt.ylabel('mean USD')
plt.show()
# RMSE
y = y.iloc[pred_day:]
yhat = pred['pred']
rmse = sqrt(mean_squared_error(y, yhat))
print('Test RMSE: %.3f' % rmse)
# Only the forecast part
da_fore = daily_re_test[['var1(t)']]
future = pd.DataFrame()
da_fore = pd.concat([da_fore, future])
da_fore['forecast'] = pred['pred']
plt.figure(figsize=(8,4))
da_fore['var1(t)'].plot()
da_fore.forecast.plot(color='r', ls='--', label='Predicted Price')
plt.legend()
plt.title('Bitcoin Prices (USD) Predicted vs Actuals, by months')
plt.ylabel('mean USD')
plt.show()
# Export predictions
pred['pred'].to_csv(r'Data\ARIMA_Pred.csv')
#### Appendix ####
def descriptive_statistics(df, series):
stats = df[series].describe()
print('\nDescriptive Statistics for', '\'' + series + '\'', '\n\n', stats)
def get_graphics(df, series, xlabel, ylabel, title, grid=True):
plt.plot(pd.to_datetime(df.index), df[series])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.grid(grid)
return plt.show()
# stationary tests
# unit root = statistical properties of series are not constant with time.
#
# In order to be stationary, series has to be constant with time. So if a series has a unit root, it is not stationary
#
# strict stationary = mean, variance, covariance are not function of time
# trend stationary = no root unit, but has a trend. if you remove the trend, it would be strict stationary
# difference stationary = series can be made strict stationary by differencing
# ADF Augmented Dickey Fuller Test (unit root test)
# null hypothesis = series has a unit root (a = 1)
# alt hypothesis = series has no unit root
#
# accept null = t-score is greter than critical value (there is a unit root)
# reject null = t-score is less than critical value (there is no unit root)
#
# accpet null = bad (not stationary)
# reject null = good (stationary)
#
# adf can be interpreted as a difference stationary test
def adf_test(df, series):
results = adfuller(df[series])
output = pd.Series(results[0:4], index=['t-score', 'p-value', '# of lags used', '# of observations'])
for key, value in results[4].items():
output['critical value (%s)' % key] = value
# if t-score < critical value at 5%, the data is stationary
# if t-score > critical value at 5%, the data is NOT stationary
if output[0] < output[5]:
print('\nADF: The data', '\'' + series + '\'', 'is STATIONARY \n\n', output)
elif output[0] > output[5]:
print('\nADF: The data', '\'' + series + '\'', 'is NOT STATIONARY \n\n', output)
else:
print('\nADF: There is something wrong with', '\'' + series + '\'', '\n\n', output)
# KPSS Kwiatkowski-Phillips-Schmidt-Shin Test (stationary test)
# null hypothesis = the series has a stationary trend
# alt hypothesis = the series has a unit root (series is not stationary)
#
# accept null = t-score is less than critical value (series is stationary)
# reject null = t-score is greater than the critical value (series is not stationary)
#
# accpet null = good (stationary)
# reject null = bad (not stationary)
#
# kpss classifies a series as stationary on the absence of a unit root
# (both strict stationary and trend stationary will be classified as stationary)
def kpss_test(df, series):
results = kpss(df[series], regression='ct')
output = | pd.Series(results[0:3], index=['t-score', 'p-value', '# lags used']) | pandas.Series |
"""
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
from __future__ import division
from datetime import datetime
import unittest
import nose
import numpy as np
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
assert(type(model1) == type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest
def testOLSWithDatasets(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
self.checkDataSet(sm.datasets.copper.load())
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all 0s
def testWLS(self):
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
result = ols(y=y, x=x, weights=1/weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1/aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start : end]
endog = dataset.endog[start : end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
result = ols(y=y, x=x)
# check that sparse version is the same
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
assert_almost_equal(reference.tvalues, result._t_stat_raw)
assert_almost_equal(reference.cov_params(), result._var_beta_raw)
assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
window = sm.tools.tools.rank(x.values) * 2
moving = ols(y=y, x=x, weights=weights, window_type=window_type,
window=window, **kwds)
# check that sparse version is the same
sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
weights=weights,
window_type=window_type,
window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in x.iteritems():
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',
'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',
'var_beta']
def compare(self, static, moving, event_index=None,
result_index=None):
index = moving._index
# Check resid if we have a time index specified
if event_index is not None:
ref = static._resid_raw[-1]
label = index[event_index]
res = moving.resid[label]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[-1]
res = moving.y_fitted[label]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
model = ols(y=df[0], x=df[1])
summary = repr(model)
class TestOLSMisc(unittest.TestCase):
'''
For test coverage with faux data
'''
@classmethod
def setupClass(cls):
if not _have_statsmodels:
raise nose.SkipTest
def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
hyp = ['1*B=0',
'1*C=0',
'1*D=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
self.assertRaises(Exception, model.f_test, '1*A=0')
def test_r2_no_intercept(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
x_with = x.copy()
x_with['intercept'] = 1.
model1 = ols(y=y, x=x)
model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
self.assert_(model1.r2 != model2.r2)
# rolling
model1 = ols(y=y, x=x, window=20)
model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assert_((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.predict(), model1.y_predict)
assert_series_equal(model1.predict(x=x), model1.y_predict)
assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)
exog = x.copy()
exog['intercept'] = 1.
rs = Series(np.dot(exog.values, model1.beta.values), x.index)
assert_series_equal(model1.y_predict, rs)
x2 = x.reindex(columns=x.columns[::-1])
assert_series_equal(model1.predict(x=x2), model1.y_predict)
x3 = x2 + 10
pred3 = model1.predict(x=x3)
x3['intercept'] = 1.
x3 = x3.reindex(columns = model1.beta.index)
expected = Series(np.dot(x3.values, model1.beta.values), x3.index)
assert_series_equal(expected, pred3)
beta = Series(0., model1.beta.index)
pred4 = model1.predict(beta=beta)
assert_series_equal(Series(0., pred4.index), pred4)
def test_predict_longer_exog(self):
exogenous = {"1998": "4760","1999": "5904","2000": "4504",
"2001": "9808","2002": "4241","2003": "4086",
"2004": "4687","2005": "7686","2006": "3740",
"2007": "3075","2008": "3753","2009": "4679",
"2010": "5468","2011": "7154","2012": "4292",
"2013": "4283","2014": "4595","2015": "9194",
"2016": "4221","2017": "4520"}
endogenous = {"1998": "691", "1999": "1580", "2000": "80",
"2001": "1450", "2002": "555", "2003": "956",
"2004": "877", "2005": "614", "2006": "468",
"2007": "191"}
endog = Series(endogenous)
exog = Series(exogenous)
model = ols(y=endog, x=exog)
pred = model.y_predict
self.assert_(pred.index.equals(exog.index))
def test_longpanel_series_combo(self):
wp = tm.makePanel()
lp = wp.to_frame()
y = lp.pop('ItemA')
model = | ols(y=y, x=lp, entity_effects=True, window=20) | pandas.stats.api.ols |
#!/usr/bin/env python
import os.path
import os
import sys
import pandas as pd
from schimpy.unit_conversions import *
if sys.version_info[0] < 3:
from pandas.compat import u
from builtins import open, file, str
else:
u = lambda x: x
import argparse
from vtools.data.timeseries import *
station_variables = ["elev", "air pressure", "wind_x", "wind_y",
"temp", "salt", "u", "v", "w"]
def staout_name(var):
try:
ndx = station_variables.index(var)
return "staout_{}".format(ndx+1)
except:
raise ValueError("Input variable is not standard station variable: {}".format(var))
def read_staout(fname,station_infile,reftime,ret_station_in = False,multi=False,elim_default=False,time_unit='s'):
"""Read a SCHISM staout_* file into a pandas DataFrame
Parameters
----------
fpath : fname
Path to input staout file or a variable name in ["elev", "air pressure", "wind_x", "wind_y", "temp", "salt", "u", "v", "w"] whose
1-index will be mapped to a name like staout_1 for elev
station_infile : str or DataFrame
Path to station.in file or DataFrame from read_station_in
reftime : Timestampe
Start of simulation, time basis for staout file elapse time
ret_station_in : bool
Return station_in DataFrame for use, which may speed reading of a second file
multi : bool
Should the returned data have a multi index for the column with location and sublocation. If False the two are collapsed
elim_default : bool
If the MultiIndex is collapsed, stations with subloc "default" will be collapsed. Eg. ("CLC","default") becomes "CLC_default"
time_unit : string
Convertible to pandas frequency string, this is the timestamp of the file.
Returns
-------
Result : DataFrame
DataFrame with hierarchical index (id,subloc) and columns representing the staout data (collapsed as described above
Examples
--------
>>> staout1,station_in = read_staout("staout_1","station.in",reftime=pd.Timestamp(2009,2,10),
ret_station_in = True,multi=False,elim_default=True)
>>> staout6 = read_staout("staout_6",station_in,reftime=pd.Timestamp(2009,2,10),multi=False,elim_default=True)
"""
if isinstance(station_infile,str):
station_in = read_station_in(station_infile)
else:
station_in = station_infile
station_index = station_in.index.copy()
if station_index.duplicated().any():
print(station_index[station_index.duplicated()])
raise ValueError("Duplicate id/subloc pair in station.in file {}".format(station_infile))
staout = pd.read_csv(fname,index_col=0,sep="\s+",header=None)
# todo: hardwire
staout.mask(staout<=-999.,inplace=True)
staout.columns = station_index
elapsed_datetime(staout,reftime=reftime,inplace=True,time_unit=time_unit)
staout.index = staout.index.round('s')
if not multi:
if elim_default:
staout.columns = [f'{loc}_{subloc}' if subloc != 'default' else f'{loc}' for loc,subloc in staout.columns]
else:
staout.columns = [f'{loc}_{subloc}' for loc,subloc in staout.columns]
f = | pd.infer_freq(staout.index) | pandas.infer_freq |
from isitfit.cost.ec2_analyze import BinCapUsed
import datetime as dt
import pytest
import pandas as pd
@pytest.fixture
def FakeMm():
class FakeMm:
StartTime = dt.datetime(2019,1,15)
EndTime = dt.datetime(2019,4,15)
return FakeMm
class TestBinCapUsedHandlePre:
def test_preNoBreak(self, FakeMm):
bcs = BinCapUsed()
ret = bcs.handle_pre({'mainManager': FakeMm()})
assert ret is not None
def test_3m(self, FakeMm):
bcs = BinCapUsed()
bcs.handle_pre({'mainManager': FakeMm()})
e = pd.DataFrame([
(dt.date(2019,1,31), 0, 0, 0, frozenset([]), dt.date(2019,1,31), dt.date(2019,1,1), ),
(dt.date(2019,2,28), 0, 0, 0, frozenset([]), dt.date(2019,2,28), dt.date(2019,2,1), ),
(dt.date(2019,3,31), 0, 0, 0, frozenset([]), dt.date(2019,3,31), dt.date(2019,3,1), ),
(dt.date(2019,4,30), 0, 0, 0, frozenset([]), dt.date(2019,4,30), dt.date(2019,4,1), ),
],
columns=['Timestamp', 'capacity_usd', 'used_usd', 'count_analyzed', 'regions_set', 'dt_start', 'dt_end']
)
for fx in ['Timestamp', 'dt_start', 'dt_end']: e[fx] = pd.to_datetime(e[fx])
e.set_index('Timestamp', inplace=True)
pd.testing.assert_frame_equal(e, bcs.df_bins)
class TestBinCapUsedPerEc2:
def test_1m(self, FakeMm):
# prepare input
df1 = pd.DataFrame([
(dt.date(2019,1,15), 10, 50),
(dt.date(2019,1,16), 12, 50),
(dt.date(2019,1,17), 12, 50),
],
columns=['Timestamp','capacity_usd','used_usd']
)
# calculate
bcs = BinCapUsed()
bcs.handle_pre({'mainManager': FakeMm()})
ctx = {'ec2_df': df1, 'ec2_dict': {'Region': 'us-west-2'}}
bcs.per_ec2(ctx)
bcs.per_ec2(ctx)
# expected
e = pd.DataFrame([
(dt.date(2019,1,31), 68, 300, 2, frozenset(['us-west-2']), dt.date(2019,1,15), dt.date(2019,1,17), ),
(dt.date(2019,2,28), 0, 0, 0, frozenset([]), dt.date(2019,2,28), dt.date(2019,2, 1), ),
(dt.date(2019,3,31), 0, 0, 0, frozenset([]), dt.date(2019,3,31), dt.date(2019,3, 1), ),
(dt.date(2019,4,30), 0, 0, 0, frozenset([]), dt.date(2019,4,30), dt.date(2019,4, 1), ),
],
columns=['Timestamp', 'capacity_usd', 'used_usd', 'count_analyzed', 'regions_set', 'dt_start', 'dt_end']
)
for fx in ['Timestamp', 'dt_start', 'dt_end']: e[fx] = | pd.to_datetime(e[fx]) | pandas.to_datetime |
import math
import logging
import re
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pygest as ge
from pygest.convenience import bids_val, dict_from_bids, short_cmp, p_string
from pygest.algorithms import pct_similarity
from scipy.stats import ttest_ind
def mantel_correlogram(X, Y, by, bins=8, r_method='Pearson', fig_size=(8, 5), save_as=None,
title='Mantel Correlogram', xlabel='distance bins', ylabel='correlation',
logger=None):
""" Return a Mantel correlogram between vector_a and vector_b, over by
:param X: For our purposes, usually an expression vector. Can be any vector of floats
:param Y: For our purposes, usually a connectivity vector. Can be any vector of floats
:param by: For our purposes, usually a distance vector. Can be any vector of floats
:param bins: The number of bins can be specified
:param r_method: The correlation can be calculated as 'Pearson', 'Spearman', or 'Kendall'
:param tuple fig_size: size of desired plot, in inches (width, height)
:param save_as: A file name for saving out the correlogram
:param title: The title of the plot
:param xlabel: The x-axis (usually distance bins) label
:param ylabel: The y-axis (X vs Y correlations) label
:param logger: We can log notes to your logger or ours.
:return: matplotlib (Figure, Axes) objects containing the correlogram plot
"""
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
# Figure out the boundaries of the distance bins
dist_min = math.floor(min(by)) - (math.floor(min(by)) % bins)
dist_max = math.ceil(max(by)) + bins - (math.ceil(max(by)) % bins)
dist_x_axis = np.arange(dist_min, dist_max, dist_max / bins)
logger.info("({:0.2f} - {:0.2f}) -> ({} - {}), {}".format(
min(by), max(by), dist_min, dist_max, dist_x_axis
))
# Calculate correlations for each distance bin separately.
ds = []
prs = []
ns = []
rlos = []
rhis = []
for a in dist_x_axis:
# Create distance filters for this particular bin
by_filter = np.logical_and(by >= a, by < a + dist_max / bins)
logger.info(" {ts:,} of {all:,} distances are between {sm:0.1f} and {lg:0.1f}.".format(
ts=np.count_nonzero(by_filter),
all=len(by),
sm=a, lg=a + dist_max / bins
))
# Filter vectors for this bin.
_Y = Y[by_filter]
_X = X[by_filter]
ns.append(len(_X))
logger.info(" using {:,} (in distance range) of the {:,} original values.".format(
len(_Y),
len(Y)
))
# Calculate the correlations for this distance bin
_r = ge.corr(_X, _Y, method=r_method)
prs.append(_r)
ds.append(a + dist_max / bins / 2)
# Since r values are not going to be normally distributed (except maybe right at zero)
# we need to transform them to Fisher's normal z' and back.
z_prime = 0.50 * math.log((1 + _r) / (1 - _r))
z_se = 1 / math.sqrt(len(_X) - 3)
z_lo = z_prime - z_se
z_hi = z_prime + z_se
r_lo = (math.exp(2 * z_lo) - 1) / (math.exp(2 * z_lo) + 1)
r_hi = (math.exp(2 * z_hi) - 1) / (math.exp(2 * z_hi) + 1)
rlos.append(r_lo)
rhis.append(r_hi)
logger.info(" r = {:0.4f} ({:0.3f} - {:0.3f}) for these {} sample-sample relationships.".format(
_r, r_lo, r_hi, len(_X)))
# Calculate an overall r for comparison
r = ge.corr(X, Y, method=r_method)
# Build the plot
fig = plt.figure(figsize=fig_size)
ax = fig.add_subplot(111)
ax.axis([dist_min, dist_max, -1.0, 1.0])
ax.axhline(y=0, xmin=0, xmax=1, linestyle=':', color='gray')
# ax.axhline(y=spearman, xmin=0, xmax=1, linestyle='--', color='green')
# ax.axhline(y=kendall, xmin=0, xmax=1, linestyle='--', color='red')
oline = ax.axhline(y=r, xmin=0, xmax=1, linestyle='--', color='black', linewidth=2)
# sline, = ax.plot(ds, srs, linestyle='-', marker='o', color='green')
# kline, = ax.plot(ds, krs, linestyle='-', marker='o', color='red')
pline, = ax.plot(ds, prs, linestyle='-', marker='o', color='black', linewidth=2)
ax.vlines(x=ds, ymin=rlos, ymax=rhis, linewidth=1, color='black')
ax.hlines(y=rhis, xmin=[x - 1 for x in ds], xmax=[x + 1 for x in ds], linewidth=1, color='black')
ax.hlines(y=rlos, xmin=[x - 1 for x in ds], xmax=[x + 1 for x in ds], linewidth=1, color='black')
for i, n in enumerate(ns):
ax.annotate('n=', (ds[i], -0.90), ha='center')
ax.annotate(n, (ds[i], -0.97), ha='center')
ax.legend((pline, oline), ('Pearson r', 'all distances'), loc='upper center')
# ax.legend((pline, sline, kline, oline), ('Pearson r', 'Spearman r', 'Kendall tau', 'all distances'))
ax.set_xticks(tuple(np.append(dist_x_axis, dist_max)))
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if save_as is not None:
fig.savefig(save_as)
# fig.close()
return fig, ax
def conn_vs_expr_scatter(X, Y, xd, yd, save_as=None,
title='Connectivity and Expression', xlabel='expression', ylabel='connectivity',
logger=None):
""" Scatter the many values for Y vs X in background and yd vs xd in foreground (darker).
This is helpful to visualize connectivity values and expression values juxtaposed.
Overlaying xd and yd can show how a subset of X and Y may lie in a particular area
of the plot or have a slightly different correlation.
:param X: A vector of expression correlations
:param Y: A vector of connectivity values
:param xd: A vector of expression correlations, a subset of X to call out
:param yd: A vector of connectivity values, a subset of Y to call out
:param save_as: The file name if you'd like to save the plot generated
:param title: Override the default title
:param xlabel: Override the default x label
:param ylabel: Override the default y label
:param logger: Catch logging output and divert it wherever you like
:return: matplotlib (Figure, Axes) objects containing the regression plot
"""
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
logger.info("Plotting {} foreground points over {} background points.".format(
len(X), len(xd)
))
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax.axis([min(X), max(X), -1.0, 1.0])
# Set the axes and plot the grid first
ax.axis([0.6, 1.0, -0.4, 1.0])
ax.axhline(y=0, xmin=0, xmax=1, linestyle=':', color='gray')
# plt.axvline(x=0, ymin=0, ymax=1, linestyle=':', color='gray')
ax.title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Plot the points next
ax.plot(X, Y, '.', color='lightblue')
ax.plot(xd, yd, '.', color='gray')
# And put the fit lines over the top of everything.
m, b = np.polyfit(X, Y, 1)
md, bd = np.polyfit(xd, yd, 1)
ax.plot(X, m * X + b, '-', color='blue')
ax.plot(xd, md * xd + bd, '-', color='black')
# Add annotations
r = np.corrcoef(X, Y)[0, 1]
ax.annotate("all:", (0.61, 0.97), ha="left", va="top", color="blue")
ax.annotate("m = {:0.3f}".format(m), (0.62, 0.91), ha="left", va="top", color="blue")
ax.annotate("r = {:0.3f}".format(r), (0.62, 0.85), ha="left", va="top", color="blue")
rd = np.corrcoef(xd, yd)[0, 1]
ax.annotate("dist:", (0.61, 0.78), ha="left", va="top", color="black")
ax.annotate("m = {:0.3f}".format(md), (0.62, 0.72), ha="left", va="top", color="black")
ax.annotate("r = {:0.3f}".format(rd), (0.62, 0.66), ha="left", va="top", color="black")
if save_as is not None:
logger.info("Saving plot to {}".format(save_as))
fig.savefig(save_as)
return fig, ax
def heat_map(expression_df,
title="Heat Map", fig_size=(5, 8), c_map="Reds",
save_as=None, logger=None):
""" Build, save, and return a heat map plot.
:param pandas.DataFrame expression_df: A pandas DataFrame containing data for the plot
:param str title: Override the default plot title with one of your choosing
:param tuple fig_size: Dimensions (mostly relative) of figure generated
:param str c_map: A seaborn color scheme string
:param str save_as: If provided, the plot will be saved to this filename
:param logging.Logger logger: If provided, logging will be directed to this logger
:return fig, ax: matplotlib figure and axes objects
"""
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
fig, ax = plt.subplots(figsize=fig_size)
sns.set_style('white')
sns.heatmap(expression_df, annot=False, ax=ax, cmap=c_map)
ax.set_title(title)
if save_as is not None:
logger.info("Saving heat map to {}".format(save_as))
fig.savefig(save_as)
return fig, ax
def overlay_normal(ax, data, c="red"):
""" Provide a normal distribution Axes for overlay onto existing plot, based on data's mean and sd
:param matplotlib.Axes ax: The axes object to draw onto
:param data: The original data for basing our normal distribution
:param str c: A string referring to a seaborn color
:return: The same axes passed as an argument, but with a normal curve drawn over it
"""
norm_data = np.random.normal(loc=np.mean(data), scale=np.std(data), size=2048)
sns.kdeplot(norm_data, color=c, ax=ax)
# ax.vlines(x=np.mean(data), ymin=0.0, ymax=1.0, linewidth=0.5, color=c)
ax.vlines(x=np.mean(data) - (2 * np.std(data)), ymin=0, ymax=5.0, linewidth=0.5, color=c)
ax.vlines(x=np.mean(data), ymin=0, ymax=5.0, linewidth=0.5, color=c)
ax.vlines(x=np.mean(data) + (2 * np.std(data)), ymin=0, ymax=5.0, linewidth=0.5, color=c)
return ax
def distribution_plot(data,
title="Distribution", fig_size=(5, 5), c="red",
save_as=None, logger=None):
""" Build, save, and return a heat map plot.
:param pandas.DataFrame data: A pandas DataFrame containing data for the plot
:param str title: Override the default plot title with one of your choosing
:param tuple fig_size: Dimensions (mostly relative) of figure generated
:param str c: A seaborn color string
:param str save_as: If provided, the plot will be saved to this filename
:param logging.Logger logger: If provided, logging will be directed to this logger
:return fig, ax: matplotlib figure and axes objects
"""
# Density plots can take a long time to build with big samples; subsample if necessary
max_density_length = 1024
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
opp_c = "blue" if c == "red" else "red"
fig, ax = plt.subplots(figsize=fig_size)
sns.set_style('white')
sub_data = data if len(data) <= max_density_length else np.random.choice(data, max_density_length)
ax = overlay_normal(sns.distplot(sub_data, hist=True, rug=True, color=c), sub_data, c=opp_c)
ax.set_title(title)
if save_as is not None:
logger.info("Saving distribution plot to {}".format(save_as))
fig.savefig(save_as)
return fig, ax
def heat_and_density_plot(value_matrix, density_position='top',
title="Heat Map", fig_size=(6, 4), ratio=3, c_map="Reds",
save_as=None, logger=None):
""" Build, save, and return a heat map plot.
:param value_matrix: A DataFrame or matrix containing data for the plot
:param str density_position: Which edge gets the density_plot?
:param str title: Override the default plot title with one of your choosing
:param tuple fig_size: Dimensions (mostly relative) of figure generated
:param integer ratio: This number-to-one heat map to density plot size
:param str c_map: A seaborn color scheme string
:param str save_as: If provided, the plot will be saved to this filename
:param logging.Logger logger: If provided, logging will be directed to this logger
:return fig: matplotlib figure object
"""
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
fig = plt.figure(figsize=fig_size)
if density_position == 'left':
gs = plt.GridSpec(ratio, ratio + 1)
ax_main = fig.add_subplot(gs[:, 1:])
ax_dens = fig.add_subplot(gs[:, 0])
go_vertical = True
elif density_position == 'right':
gs = plt.GridSpec(ratio, ratio + 1)
ax_main = fig.add_subplot(gs[:, :-1])
ax_dens = fig.add_subplot(gs[:, -1])
go_vertical = True
elif density_position == 'bottom':
gs = plt.GridSpec(ratio + 1, ratio)
ax_main = fig.add_subplot(gs[:-1, :])
ax_dens = fig.add_subplot(gs[-1, :])
go_vertical = False
else: # density_position == 'top' or some invalid setting triggering 'top' default
# GridSpec is set with nrows, ncols
gs = plt.GridSpec(ratio + 1, ratio)
# For a top-density, use [all rows after the 0th x all columns] for main
ax_main = fig.add_subplot(gs[1:, :])
# For a top-density, use [0th row x all columns] for density plot
ax_dens = fig.add_subplot(gs[0, :])
go_vertical = False
# Density plots can take a long time to build with big samples; subsample if necessary
max_density_length = 1024
if isinstance(value_matrix, pd.DataFrame):
value_matrix = value_matrix.as_matrix()
if value_matrix.shape[0] == value_matrix.shape[1]:
value_vector = value_matrix[np.tril_indices(n=value_matrix.shape[0], k=-1)]
else:
value_vector = value_matrix.flatten()
if len(value_vector) <= max_density_length:
sub_vector = value_vector
else:
sub_vector = np.random.choice(value_vector, max_density_length)
sns.set_style('white')
c = c_map.lower()[:-1]
sns.heatmap(value_matrix, annot=False, ax=ax_main, cmap=c_map,
xticklabels=False, yticklabels=False)
sns.set_style('white')
ax_dens = overlay_normal(sns.distplot(
sub_vector, hist=True, rug=True, color=c, ax=ax_dens, vertical=go_vertical
), sub_vector, c=c)
ax_dens.set_title(title)
if save_as is not None:
logger.info("Saving heat map to {}".format(save_as))
fig.savefig(save_as)
return fig
def whack_a_probe_plot(donor, hemisphere, samples, conns, conss=None, nulls=None, fig_size=(16, 9),
save_as=None, logger=None):
""" Plot increasing correlations by different whack-a-probe algorithms.
:param donor: The donor of interest
:param hemisphere: The donor's hemisphere of interest
:param samples: The subset (cor, sub, all) of donor's samples to represent
:param conns: A list of tuples, each tuple (name, DataFrame), each DataFrame representing rising correlations
:param conss: A list of tuples, each tuple (name, DataFrame), each DataFrame representing rising correlations
:param nulls: A list of tuples, each tuple (name, DataFrame), each DataFrame representing rising correlations
:param fig_size: The size, in inches, of the figure (width, height)
:param str save_as: If provided, the plot will be saved to this filename
:param logging.Logger logger: If provided, logging will be directed to this logger
:return: matplotlib figure object
"""
if logger is None:
logger = logging.getLogger("pygest")
fig, ax = plt.subplots(figsize=fig_size)
# Plot a single horizontal line at y=0
ax.axhline(0, 0, 17000, color='gray')
# Finally, plot the real curves
def plot_curves(the_curve, ls, lc):
if 'max' in the_curve[0]:
legend_label = "{}, max r={:0.3f}".format(
the_curve[0][6:], max(list(the_curve[1]['r' if 'r' in the_curve[1].columns else 'b']))
)
elif 'min' in the_curve[0]:
legend_label = "{}, min r={:0.3f}".format(
the_curve[0][6:], min(list(the_curve[1]['r' if 'r' in the_curve[1].columns else 'b']))
)
else:
legend_label = the_curve[0][6:]
ax.plot(list(the_curve[1].index), list(the_curve[1]['r' if 'r' in the_curve[1].columns else 'b']),
label=legend_label, linestyle=ls, color=lc)
# Plot the nulls first, so they are in the background
print("nulls = ".format(nulls))
if nulls is not None and len(nulls) > 0:
for a_null in nulls:
if 'smrt' in a_null[0]:
plot_curves(a_null, ls=':', lc='lightgray')
elif 'once' in a_null[0]:
plot_curves(a_null, ls=':', lc='lightgray')
else:
plot_curves(a_null, ls=':', lc='yellow')
# Also, plot the averaged null, our expected tortured r-value if we are only begging noise to confess
def plot_mean_curves(mm, f, the_nulls):
the_filter = [mm in x[0] for x in the_nulls]
if sum(the_filter) > 0:
the_nulls = [i for (i, v) in zip(the_nulls, the_filter) if v]
mean_the_nulls = np.mean([x[1]['r' if 'r' in x[1].columns else 'b'] for x in the_nulls], axis=0)
ll = "{}, mean {} r={:0.3f}".format('shuffled', mm, f(mean_the_nulls))
ax.plot(list(the_nulls[0][1].index), mean_the_nulls, linestyle=':', color='darkgray', label=ll)
plot_mean_curves("max", max, nulls)
plot_mean_curves("min", min, nulls)
if conns is not None and len(conns) > 0:
for a_real in conns:
if 'smrt' in a_real[0]:
plot_curves(a_real, ls='-', lc='black')
elif 'once' in a_real[0]:
plot_curves(a_real, ls='--', lc='black')
else:
plot_curves(a_real, ls='-', lc='yellow')
if conss is not None and len(conss) > 0:
for a_real in conss:
if 'smrt' in a_real[0]:
plot_curves(a_real, ls='-', lc='green')
elif 'once' in a_real[0]:
plot_curves(a_real, ls='--', lc='green')
else:
plot_curves(a_real, ls='-', lc='yellow')
# Tweak the legend, then add it to the axes, too
def leg_sort(t):
""" Sort the legend in a way that maps to peaks of lines visually. """
score = 0
if 'smrt' in t[0]:
score += 3
elif 'once' in t[0]:
score += 2
else:
score += 1
if 'max' in t[0]:
score *= -1
elif 'min' in t[0]:
score *= 1
return score
handles, labels = ax.get_legend_handles_labels()
# sort both labels and handles by labels
labels, handles = zip(*sorted(zip(labels, handles), key=leg_sort))
ax.legend(handles, labels, loc=2)
# Finish it off with a title
ax.set_title("{}, {} hemisphere, {} set".format(donor, hemisphere, samples))
if save_as is not None:
logger.info("Saving whack-a-probe plot to {}".format(save_as))
fig.savefig(save_as)
return fig
def light_dark_palettes(central_palette=None):
""" Return two palettes, one lighter and one darker than central_palette, but with the same scheme.
:param central_palette: A seaborn color palette, defaults to seaborn's colorblind palette.
:returns light_palette, dark_palette: Two palettes, one lighter and one darker
"""
# Generate colorblind palette, then adjust intensity up and down slightly for light and dark versions
# Seaborn handles palettes, strings, or even None to generate a palette.
pal = sns.color_palette(central_palette)
# Reduce the intensity of each color channel to darken the overall color
pal_dark = [(
c[0] - (c[0] / 3),
c[1] - (c[1] / 3),
c[2] - (c[2] / 3)
) for c in pal]
# Increase the intensity of each color channel to lighten the overall color
pal_light = [(
c[0] + ((1.0 - c[0]) / 3),
c[1] + ((1.0 - c[1]) / 3),
c[2] + ((1.0 - c[2]) / 3)
) for c in pal]
return pal_light, pal_dark
def curve_properties(df, shuffle_name, palette="colorblind"):
""" Return appropriate properties for curves and boxplots representing a given shuffle type.
:param pandas.DataFrame df: A dataframe with a 'path' column holding paths to result files by 'shuf'
:param str shuffle_name: The name of the shuffle type key
:param palette: The name of the seaborn color palette to use
:returns dict: dict with labelled properties of a curve
"""
# print("DEBUG: df, return value from curve_properties(df, {})".format(shuffle_name))
# print("df is {}; has {} {}-paths.".format(df.shape, df[df['shuf'] == shuffle_name].shape, shuffle_name))
# print(df.shape)
# Generate color palettes
pal_light, pal_dark = light_dark_palettes(palette)
# 0: blue, 1: orange, 2: green, 3: red, 4: violet, 5: brown, 6: pink, 7: gray, 8: yellow, 9: aqua
property_dict = {
"files": list(df.loc[df['shuf'] == shuffle_name, 'path']),
"shuf": shuffle_name,
}
if shuffle_name == "none":
property_dict.update({"linestyle": "-", "color": "black", "light_color": "gray", })
elif shuffle_name.startswith("be"):
property_dict.update({"linestyle": ":", "color": pal_dark[0], "light_color": pal_light[0], }) # 0 == blue
elif shuffle_name == "dist":
property_dict.update({"linestyle": ":", "color": pal_dark[3], "light_color": pal_light[3], }) # 3 == red
elif shuffle_name == "agno":
property_dict.update({"linestyle": ":", "color": pal_dark[6], "light_color": pal_light[6], }) # 6 == pink
elif shuffle_name == "smsh":
property_dict.update({"linestyle": ":", "color": pal_dark[5], "light_color": pal_light[5], }) # 5 == brown
elif shuffle_name == "edge":
property_dict.update({"linestyle": ":", "color": pal_dark[2], "light_color": pal_light[2], }) # 2 == green
else:
property_dict.update({"linestyle": ".", "color": pal_dark[7], "light_color": pal_light[7], }) # 7 == gray
# print("DEBUG: property_dict, return value from curve_properties(df, {})".format(shuffle_name))
# print("property_dict has {} files for shuf {}".format(len(property_dict['files']), shuffle_name))
return property_dict
def push_plot_via_dict(data, d):
""" Use settings from a json file to specify a push plot.
:param data: and instance of the pygest.Data object
:param d: a dictionary specifying configuration for plot
:return: 0 for success, integer error code for failure
"""
print("Pretending to make a plot from {}".format(d))
colormap = ['black', 'blue', 'red', 'green', 'gray', 'orange', 'violet']
plottables = []
for subgroup_spec in d['intra'].keys():
for i, subgroup in enumerate(d['intra'][subgroup_spec]):
bids_filter = d['controls'].copy()
bids_filter[subgroup_spec] = subgroup
plottables.append({
'files': data.derivatives(bids_filter),
'color': colormap[i % len(colormap)],
'linestyle': ':',
'label_keys': [subgroup_spec]
})
for curve in plottables:
print("Curves in {}".format(os.path.join(d['outdir'], d['filename'])))
print(curve)
fig, ax = push_plot(plottables, title=d['title'])
fig.savefig(os.path.join(d['outdir'], d['filename']))
return 0
def push_vs_null_plot(data, donor, hem, samp, algo='smrt', comp='conn', mask='none',
label_keys=None):
""" Use reasonable defaults to generate a push_plot for a particular dataset.
This function does all of the gathering of files and generation of lists
for sending to push_plot.
:param data: an instance of the pygest.Data object
:param donor: a string representing the donor of interest
:param hem: a single character representing left or right hemisphere
:param samp: 'cor' or 'sub' to indicate which sample set to use
:param algo: 'smrt' for the smart efficient algorithm, 'once' or 'evry' for alternatives
:param comp: 'conn' for connectivity, or 'cons' for connectivity similarity comparisons
:param mask: 'none' for full data, or 'fine', 'coarse' or an integer for masked data
:param label_keys: A list of keys can limit the size of the legend
:return figure: axes of the plot
"""
the_filters = {'sub': donor, 'hem': hem, 'samp': samp, 'algo': algo, 'comp': comp,
'mask': mask, 'adj': 'none', 'exclusions': ['test', 'NULL', ], }
# Get results for actual values and three types of shuffles
the_results = {}
for result_type in ['none', 'agno', 'dist', 'edge', ]:
# Ask Data for a list of files that match our filters
the_results[result_type] = data.derivatives(the_filters, shuffle=result_type)
print("Retrieved {} results for {} shuffles.".format(len(the_results[result_type]), result_type))
# Set up several sets of curves to be plotted
plottables = [
{'files': the_results['agno'], 'color': 'gray', 'linestyle': ':',
'label': 'shuffle (n={})'.format(len(the_results['agno']))},
{'files': the_results['dist'], 'color': 'red', 'linestyle': ':',
'label': 'weighted (n={})'.format(len(the_results['dist']))},
{'files': the_results['edge'], 'color': 'blue', 'linestyle': ':',
'label': 'edges (n={})'.format(len(the_results['edge']))},
{'files': the_results['none'], 'color': 'black', 'linestyle': '-',
'label_keys': ['comp', ]},
]
the_title = "{}_{}_{}_{}_{} actual vs shuffling".format(donor, hem, samp, comp, mask)
return push_plot(plottables, the_title, label_keys=label_keys, fig_size=(8, 5))
def push_plot(push_sets, title="Push Plot", label_keys=None, plot_overlaps=False, fig_size=(16, 12),
save_as=None, push_x_to=None):
""" Draw a plot with multiple push results overlaid for comparison.
:param push_sets: a list of dicts, each dict contains ('files', optional 'color', optional 'linestyle')
:param title: override the default "Push Plot" title with something more descriptive
:param label_keys: if specified, labels will be generated from these keys and the files in push_sets
:param plot_overlaps: If true, calculate pct_overlap for each group and annotate the plot with them
:param fig_size: override the default (16, 9) fig_size
:param save_as: if specified, the plot will be drawn into the file provided
:param push_x_to: if specified, an optimization from (0 to 100) will plot as (push_x_to - 100 to push_x_to)
:return: figure, axes of the plot
"""
fig, ax = plt.subplots(figsize=fig_size)
fig.tight_layout()
# Plot a single horizontal line at y=0
ax.axhline(0, 0, 17000, color='gray')
if len(push_sets) == 0:
return fig, ax
# Plot each push_set
ls = '-'
lc = 'black'
label = ''
curve_list = []
for i, push_set in enumerate(push_sets):
if 'linestyle' in push_set:
ls = push_set['linestyle']
if 'color' in push_set:
lc = push_set['color']
if 'label' in push_set:
label = push_set['label']
if 'label_keys' in push_set:
label_keys = push_set['label_keys']
if len(push_set) > 0:
ax, df = plot_pushes(push_set['files'], axes=ax, label=label, label_keys=label_keys,
linestyle=ls, color=lc, push_x_to=push_x_to)
df['push_set'] = i
if len(df) > 0:
curve_list.append(df)
all_curves = pd.concat(curve_list, axis=0, sort=True)
# Append summary statistics to a label
def label_add_summary(x, d):
if (len(d) == 0) or (len(d['best_score']) == 0):
return "{} empty".format(x)
if plot_overlaps and (len(d['f']) > 1):
return "{}={:0.2f} with {:0.1%} overlap (n={})".format(
x, np.mean(d['best_score']), pct_similarity(d['f']), len(d.index)
)
else:
return "{}={:0.2f} (n={})".format(
x, np.mean(d['best_score']), len(d.index)
)
# Tweak the legend, then add it to the axes, too
def legend_sort_val(t):
""" Sort the legend in a way that maps to peaks of lines visually. """
val = re.compile(r"^.*r=(\S+) .*$").search(t[0]).groups()[0]
# Return the negative so high values are first in the vertical legend.
return float(val) * -1.0
max_handles = []
min_handles = []
max_labels = []
min_labels = []
null_handles = []
null_labels = []
handles, labels = ax.get_legend_handles_labels()
# Add summary statistics to labels
labels = [label_add_summary(x, all_curves[all_curves['group'] == x]) for x in labels]
# sort both labels and handles by labels
if len(labels) > 0 and len(handles) > 0:
labels, handles = zip(*sorted(zip(labels, handles), key=legend_sort_val))
for i, l in enumerate(labels):
if "max" in l:
max_handles.append(handles[i])
max_labels.append(labels[i])
elif "min" in l:
min_handles.append(handles[i])
min_labels.append(labels[i])
else:
null_handles.append(handles[i])
null_labels.append(labels[i])
# Add a full legend (that will be emptied) and three separate legends with appropriate labels in each.
ax.legend(handles, labels, loc=7)
if len(max_labels) > 0:
ax.add_artist(ax.legend(max_handles, max_labels, loc=2))
if len(null_labels) > 0:
ax.add_artist(ax.legend(null_handles, null_labels, loc=6))
if len(min_labels) > 0:
ax.add_artist(ax.legend(min_handles, min_labels, loc=3))
# Finish it off with a title
ax.set_title(title)
if save_as is not None:
fig.savefig(save_as)
return fig, ax
def plot_pushes(files, axes=None, label='', label_keys=None, linestyle='-', color='black', push_x_to=None):
""" Plot the push results from a list of tsv files onto axes. This plots as many curves
as are in files, and is called repeatedly (each time with different curves) on a
single figure by push_plot.
:param list files: A list of full file paths to tsv data holding push results
:param axes: matplotlib axes for plotting to
:param label: if supplied, override the calculated label for use in the legend for this set of results
:param label_keys: if supplied, calculated the label from these fields
:param linestyle: this linestyle will be used to plot these results
:param color: this color will be used to plot these results
:param push_x_to: if specified, an optimization from (0 to 100) will plot as (push_x_to - 100 to push_x_to)
:returns axes, pd.DataFrame: the axes containing the representations of results in files
"""
if axes is None:
fig, axes = plt.subplots()
if len(files) == 0:
return axes, pd.DataFrame()
# Remember values for duplicate labels so we can average them at the end if necessary.
summary_list = []
# label_values = {}
# label_files = {}
for f in files:
df = pd.read_csv(f, sep='\t', index_col=0)
measure = 'r' if 'r' in df.columns else 'b'
summary = {'f': f, 'measure': measure, 'tgt': bids_val('tgt', f)}
if push_x_to is not None:
# Shift indices higher to allow alignment of variable-length plots to the right side.
df.index = df.index + (push_x_to - df.index.max())
if summary['tgt'] == 'max':
the_best_score = df[measure].max()
the_best_index = df[measure][5:].idxmax()
elif summary['tgt'] == 'min':
the_best_score = df[measure].min()
the_best_index = df[measure][5:].idxmin()
else:
the_best_score = 0.0
the_best_index = 0
summary['best_score'] = the_best_score
summary['best_index'] = the_best_index
# If a label is not provided, create it, and in a way we can modify it later.
# These labels are not tied to the axes, yet, and exist only in the local function
if label == '':
if label_keys is None:
# default values, if they aren't specified
label_keys = ['tgt', 'alg', 'msk', ]
label_group = "_".join([short_cmp(bids_val(k, f)) for k in label_keys])
label_group = label_group + ", {} {}".format(bids_val('tgt', f), measure)
# try:
# label_values[label_group].append(best_score)
# except KeyError:
# # If the label_group does not yet appear in label_values, start the list
# label_values[label_group] = [best_score, ]
# try:
# label_files[label_group].append(f)
# except KeyError:
# # If the label_group does not yet appear in label_files, start the list
# label_files[label_group] = [f, ]
else:
label_group = label
summary['group'] = label_group
# Plot the curve on the axes
real_handles, axes_labels = axes.get_legend_handles_labels()
if label_group in [x.split("=")[0] for x in axes_labels]:
# If a label already exists, just plot the line without a label.
axes.plot(list(df.index), list(df[measure]), linestyle=linestyle, color=color)
else:
# If there's no label, make one and plot the line with it.
axes.plot(list(df.index), list(df[measure]), linestyle=linestyle, color=color, label=label_group)
summary_list.append(summary)
summaries = | pd.DataFrame(summary_list) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_operation_capacity.py
@time: 2019-05-30
"""
import gc
import sys
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../../')
import six, pdb
import pandas as pd
from pandas.io.json import json_normalize
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorBasicDerivation(object):
"""
基础衍生类因子
"""
def __init__(self):
__str__ = 'factor_basic_derivation'
self.name = '基础衍生'
self.factor_type1 = '基础衍生'
self.factor_type2 = '基础衍生'
self.description = '基础衍生类因子'
@staticmethod
def EBIT(tp_derivation, factor_derivation, dependencies=['total_profit', 'interest_expense', 'interest_income', 'financial_expense']):
"""
:name: 息税前利润(MRQ)
:desc: [EBIT_反推法]息税前利润(MRQ) = 利润总额 + 利息支出 - 利息收入
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
func = lambda x: (x[0] + x[1] - x[2]) if x[1] is not None and x[2] is not None else (x[0] + x[3] if x[3] is not None else None)
management['EBIT'] = management[dependencies].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['EBIT']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EBITDA(tp_derivation, factor_derivation, dependencies=['total_profit', 'income_tax'],
dependency=['EBIT']):
"""
:name: 息前税后利润(MRQ)
:desc: 息前税后利润(MRQ)=EBIT(反推法)*(if 所得税&利润总额都>0,则1-所得税率,否则为1),所得税税率 = 所得税/ 利润总额
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: None if x[0] is None or x[1] is None or x[2] is None or x[1] == 0 else (x[0] * (1 - x[2] / x[1]) if x[1] > 0 and x[2] > 0 else x[0])
management['EBITDA'] = management[dependency].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['EBITDA']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def DepAndAmo(tp_derivation, factor_derivation, dependencies=['fixed_assets_depreciation',
'intangible_assets_amortization',
'defferred_expense_amortization']):
"""
:name: 折旧和摊销(MRQ)
:desc: 固定资产折旧 + 无形资产摊销 + 长期待摊费用摊销
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['DepAndAmo'] = management[dependencies].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['DepAndAmo']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FCFF(tp_derivation, factor_derivation, dependencies=['total_current_assets',
'cash_equivalents',
'total_current_liability',
'shortterm_loan',
'shortterm_bonds_payable',
'non_current_liability_in_one_year',
'total_current_assets_pre',
'cash_equivalents_pre',
'total_current_liability_pre',
'shortterm_loan_pre',
'shortterm_bonds_payable_pre',
'non_current_liability_in_one_year_pre',
'fix_intan_other_asset_acqui_cash',
],
dependency=['EBITDA', 'DepAndAmo']):
"""
:name: 企业自由现金流量(MRQ)
:desc: 息前税后利润+折旧与摊销-营运资本增加-资本支出 = 息前税后利润+ 折旧与摊销-营运资本增加-构建固定无形和长期资产支付的现金, 营运资本 = 流动资产-流动负债, 营运资金=(流动资产-货币资金)-(流动负债-短期借款-应付短期债券-一年内到期的长期借款-一年内到期的应付债券)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] + x[1] - (x[2] - x[3] - x[4] + x[5] + x[6] + x[7]) + (x[2] - x[3] - x[4] + x[5] + x[6] + x[7]) - x[8]if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None and \
x[6] is not None and \
x[7] is not None and \
x[8] is not None else None
management['FCFF'] = management[dependency].apply(func, axis=1)
management = management[['FCFF']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FCFE(tp_derivation, factor_derivation, dependencies=['borrowing_repayment',
'cash_from_borrowing',
'cash_from_bonds_issue'],
dependency=['FCFF']):
"""
:name: 股东自由现金流量(MRQ)
:desc: 企业自由现金流量-偿还债务所支付的现金+取得借款收到的现金+发行债券所收到的现金(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] - x[1] + x[2] + x[3] if x[0] is not None and x[1] is not None and \
x[2] is not None and x[3] is not None else None
management['FCFE'] = management[dependency].apply(func, axis=1)
management = management[['FCFE']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NonRecGainLoss(tp_derivation, factor_derivation, dependencies=['np_parent_company_owners', 'np_cut']):
"""
:name: 非经常性损益(MRQ)
:desc: 归属母公司净利润(MRQ) - 扣非净利润(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NonRecGainLoss'] = management[dependencies].apply(func, axis=1)
management = management[['NonRecGainLoss']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetOptInc(tp_derivation, factor_derivation, sw_industry,
dependencies_er=['total_operating_revenue', 'total_operating_cost'],
dependencies_yh=['commission_income', 'net_profit', 'other_business_profits', 'operating_cost'],
dependencies_zq=['commission_income', 'net_profit', 'other_operating_revenue', 'operating_cost'],
dependencies_bx=['operating_revenue', 'operating_cost', 'fair_value_variable_income',
'investment_income', 'exchange_income']):
"""
:name: 经营活动净收益(MRQ)
:desc: 新准则(一般企业):营业总收入-营业总成本"
:unit: 元
:view_dimension: 10000
"""
industry2_set = ['430100', '370100', '410400', '450500', '640500', '510100', '620500', '610200', '330200',
'280400', '620400', '450200', '270500', '610300', '280300', '360300', '410100', '370400',
'280200', '730200', '710200', '720200', '640400', '270300', '110400', '220100', '240300',
'270400', '710100', '420100', '420500', '420400', '370600', '720100', '640200', '220400',
'330100', '630200', '610100', '370300', '410300', '220300', '640100', '490300', '450300',
'220200', '370200', '460200', '420200', '460100', '360100', '620300', '110500', '650300',
'420600', '460300', '720300', '270200', '630400', '410200', '280100', '210200', '420700',
'650200', '340300', '220600', '110300', '350100', '620100', '210300', '240200', '340400',
'240500', '360200', '270100', '230100', '370500', '110100', '460400', '110700', '110200',
'630300', '450400', '220500', '730100', '640300', '630100', '240400', '420800', '650100',
'350200', '620200', '210400', '420300', '110800', '360400', '650400', '110600', '460500',
'430200', '210100', '240100', '250100', '310300', '320200', '310400', '310200', '320100',
'260500', '250200', '450100', '470200', '260200', '260400', '260100', '440200', '470400',
'310100', '260300', '220700', '470300', '470100', '340100', '340200', '230200']
dependencies = list(set(dependencies_er + dependencies_yh + dependencies_bx + dependencies_zq))
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
management = pd.merge(management, sw_industry, how='outer', on='security_code').set_index('security_code')
if len(management) <= 0:
return None
management_tm = pd.DataFrame()
func = lambda x: x[0] + x[1] + x[2] - x[3] if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None else None
# 银行 ['440100', '480100']
management_yh = management[management['industry_code2'].isin(['440100', '480100'])]
management_yh['NetOptInc'] = management_yh[dependencies_yh].apply(func, axis=1)
management_tm = management_tm.append(management_yh)
# 证券['440300', '490100']
management_zq = management[management['industry_code2'].isin(['440300', '490100'])]
management_zq['NetOptInc'] = management_zq[dependencies_zq].apply(func, axis=1)
management_tm = management_tm.append(management_zq)
func1 = lambda x: x[0] - x[1] - x[2] - x[3] - x[4] if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None else None
# 保险['440400', '490200']
management_bx = management[management['industry_code2'].isin(['440400', '490200'])]
management_bx['NetOptInc'] = management_bx[dependencies_bx].apply(func1, axis=1)
management_tm = management_tm.append(management_bx)
func2 = lambda x: None if x[0] is None else (x[0] if x[1] is None else x[0] - x[1])
management_er = management[management['industry_code2'].isin(industry2_set)]
management_er['NetOptInc'] = management_er[dependencies_er].apply(func2, axis=1)
management_tm = management_tm.append(management_er)
dependencies = dependencies + ['industry_code2']
management_tm = management_tm[['NetOptInc']]
factor_derivation = pd.merge(factor_derivation, management_tm, how='outer', on="security_code")
return factor_derivation
@staticmethod
def WorkingCap(tp_derivation, factor_derivation, dependencies=['total_current_assets',
'total_current_liability']):
"""
:name: 运营资本(MRQ)
:desc: 流动资产(MRQ)-流动负债(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['WorkingCap'] = management[dependencies].apply(func, axis=1)
management = management[['WorkingCap']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TangibleAssets(tp_derivation, factor_derivation, dependencies=['equities_parent_company_owners',
'intangible_assets',
'development_expenditure',
'good_will',
'long_deferred_expense',
'deferred_tax_assets']):
"""
:name: 有形资产(MRQ)
:desc: 股东权益(不含少数股东权益)- (无形资产 + 开发支出 + 商誉 + 长期待摊费用 + 递延所得税资产)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - (x[1] + x[2] + x[3] + x[4] + x[5]) if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None else None
management['TangibleAssets'] = management[dependencies].apply(func, axis=1)
management = management[['TangibleAssets']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def RetainedEarnings(tp_derivation, factor_derivation, dependencies=['surplus_reserve_fund',
'retained_profit']):
"""
:name: 留存收益(MRQ)
:desc: 盈余公积MRQ + 未分配利润MRQ
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] if x[0] is not None and x[1] is not None else None
management['RetainedEarnings'] = management[dependencies].apply(func, axis=1)
management = management[['RetainedEarnings']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestFreeCurLb(tp_derivation, factor_derivation, dependencies=['bill_receivable',
'accounts_payable',
'advance_peceipts',
'salaries_payable',
'taxs_payable',
'accrued_expenses',
'other_payable',
'long_term_deferred_income',
'other_current_liability',
]):
"""
:name: 无息流动负债(MRQ)
:desc: 无息流动负债 = 应收票据+应付帐款+预收款项+应付职工薪酬+应交税费+其他应付款+预提费用+递延收益+其他流动负债
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] + x[3] + x[4] + x[5] + x[6] + x[7] + x[8] if x[0] is not None or \
x[1] is not None or \
x[2] is not None or \
x[3] is not None or \
x[4] is not None or \
x[5] is not None or \
x[6] is not None or \
x[7] is not None or \
x[8] is not None else None
management['InterestFreeCurLb'] = management[dependencies].apply(func, axis=1)
management = management[['InterestFreeCurLb']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestFreeNonCurLb(tp_derivation, factor_derivation, dependencies=['total_non_current_liability',
'longterm_loan',
'bonds_payable']):
"""
:name: 无息非流动负债(MRQ)
:desc: 非流动负债合计 - 长期借款 - 应付债券
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] - x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['InterestFreeNonCurLb'] = management[dependencies].apply(func, axis=1)
management = management[['InterestFreeNonCurLb']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestBearingLiabilities(tp_derivation, factor_derivation, dependencies=['total_liability'],
dependency=['InterestFreeCurLb', 'InterestFreeNonCurLb']):
"""
:name: 带息负债(MRQ)
:desc: 负债合计-无息流动负债-无息非流动负债(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependencies + dependency
func = lambda x: x[0] - x[1] - x[2] if x[0] is not None and \
x[1] is not None and \
x[2] is not None else None
management['InterestBearingLiabilities'] = management[dependency].apply(func, axis=1)
management = management[['InterestBearingLiabilities']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetDebt(tp_derivation, factor_derivation, dependencies=['cash_equivalents'],
dependency=['InterestBearingLiabilities']):
"""
:name: 净债务(MRQ)
:desc: 净债务 = 带息债务(MRQ) - 货币资金(MRQ)。 其中,带息负债 = 短期借款 + 一年内到期的长期负债 + 长期借款 + 应付债券 + 应付利息
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NetDebt'] = management[dependency].apply(func, axis=1)
management = management[['NetDebt']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EquityPC(tp_derivation, factor_derivation, dependencies=['equities_parent_company_owners']):
"""
:name: 归属于母公司的股东权益(MRQ)
:desc: 归属于母公司的股东权益(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
management = management.rename(columns={'equities_parent_company_owners': 'EquityPC'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalInvestedCap(tp_derivation, factor_derivation, dependencies=['total_owner_equities' ],
dependency=['InterestBearingLiabilities']):
"""
:name: 全部投入资本(MRQ)
:desc: 股东权益+(负债合计-无息流动负债-无息长期负债)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
dependency = dependency + dependencies
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] if x[0] is not None and x[1] is not None else None
management['TotalInvestedCap'] = management[dependency].apply(func, axis=1)
management = management[['TotalInvestedCap']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalAssets(tp_derivation, factor_derivation, dependencies=['total_assets']):
"""
:name: 资产总计(MRQ)
:desc: 资产总计(MRQ) balance
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_assets': 'TotalAssets'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalFixedAssets(tp_derivation, factor_derivation, dependencies=['total_fixed_assets_liquidation']):
"""
:name: 固定资产合计(MRQ)
:desc: 固定资产合计(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_fixed_assets_liquidation': 'TotalFixedAssets'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalLib(tp_derivation, factor_derivation, dependencies=['total_liability']):
"""
:name: 负债合计(MRQ)
:desc: 负债合计(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_liability': 'TotalLib'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def ShEquity(tp_derivation, factor_derivation, dependencies=['total_owner_equities']):
"""
:name: 股东权益(MRQ)
:desc: 股东权益(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_owner_equities': 'ShEquity'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def CashAndCashEqu(tp_derivation, factor_derivation, dependencies=['cash_and_equivalents_at_end']):
"""
:name: 期末现金及现金等价物(MRQ)
:desc: 期末现金及现金等价物(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'cash_and_equivalents_at_end': 'CashAndCashEqu'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def SalesTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue']):
"""
:name: 营业总收入(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业总收入”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_operating_revenue': 'SalesTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalOptCostTTM(tp_derivation, factor_derivation, dependencies=['total_operating_cost']):
"""
:name: 营业总成本(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业总成本”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_operating_cost': 'TotalOptCostTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def OptIncTTM(tp_derivation, factor_derivation, dependencies=['operating_revenue']):
"""
:name: 营业收入(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业收入”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'operating_revenue': 'OptIncTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def GrossMarginTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue',
'total_operating_cost']):
"""
:name: 毛利(TTM) 营业毛利润
:desc: 根据截止指定日已披露的最新报告期“毛利”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: (x[0] - x[1]) / x[1] if x[1] != 0 and x[1] is not None else None
management['GrossMarginTTM'] = management[dependencies].apply(func, axis=1)
management = management[['GrossMarginTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def SalesExpensesTTM(tp_derivation, factor_derivation, dependencies=['sale_expense']):
"""
:name: 销售费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“销售费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'SALESEsale_expenseXPE': 'SalesExpensesTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def AdmFeeTTM(tp_derivation, factor_derivation, dependencies=['administration_expense']):
"""
:name: 管理费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“管理费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'administration_expense': 'AdmFeeTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FinFeeTTM(tp_derivation, factor_derivation, dependencies=['financial_expense']):
"""
:name: 财务费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“财务费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'financial_expense': 'FinFeeTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def PerFeeTTM(tp_derivation, factor_derivation, dependencies=['sale_expense',
'administration_expense',
'financial_expense',
]):
"""
:name: 期间费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“期间费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['PerFeeTTM'] = management[dependencies].apply(func, axis=1)
management = management[['PerFeeTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestExpTTM(tp_derivation, factor_derivation, dependencies=['interest_expense']):
"""
:name: 利息支出(TTM)
:desc: 根据截止指定日已披露的最新报告期“利息支出”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'interest_expense': 'InterestExpTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def MinorInterestTTM(tp_derivation, factor_derivation, dependencies=['minority_profit']):
"""
:name: 少数股东损益(TTM)
:desc: 根据截止指定日已披露的最新报告期“少数股东损益”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'minority_profit': 'MinorInterestTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def AssetImpLossTTM(tp_derivation, factor_derivation, dependencies=['asset_impairment_loss']):
"""
:name: 资产减值损失(TTM)
:desc: 根据截止指定日已披露的最新报告期“资产减值损失”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'asset_impairment_loss': 'AssetImpLossTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetIncFromOptActTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue',
'total_operating_cost']):
"""
:name: 经营活动净收益(TTM)
:desc: 根据截止指定日已披露的最新报告期“经营活动净收益”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NetIncFromOptActTTM'] = management[dependencies].apply(func, axis=1)
management = management[['NetIncFromOptActTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetIncFromValueChgTTM(tp_derivation, factor_derivation, dependencies=['fair_value_variable_income',
'investment_income',
'exchange_income',
]):
"""
:name: 价值变动净收益(TTM)
:desc: 公允价值变动净收益+投资净收益+汇兑净收益
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['NetIncFromValueChgTTM'] = management[dependencies].apply(func, axis=1)
management = management[['NetIncFromValueChgTTM']]
factor_derivation = | pd.merge(factor_derivation, management, how='outer', on="security_code") | pandas.merge |
""" miscellaneous sorting / groupby utilities """
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Callable,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
import numpy as np
from pandas._libs import algos, hashtable, lib
from pandas._libs.hashtable import unique_label_indices
from pandas._typing import IndexKeyFunc
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_extension_array_dtype,
)
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas import MultiIndex
from pandas.core.indexes.base import Index
_INT64_MAX = np.iinfo(np.int64).max
def get_indexer_indexer(
target: "Index",
level: Union[str, int, List[str], List[int]],
ascending: bool,
kind: str,
na_position: str,
sort_remaining: bool,
key: IndexKeyFunc,
) -> Optional[np.array]:
"""
Helper method that return the indexer according to input parameters for
the sort_index method of DataFrame and Series.
Parameters
----------
target : Index
level : int or level name or list of ints or list of level names
ascending : bool or list of bools, default True
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
na_position : {'first', 'last'}, default 'last'
sort_remaining : bool, default True
key : callable, optional
Returns
-------
Optional[ndarray]
The indexer for the new index.
"""
target = ensure_key_mapped(target, key, levels=level)
target = target._sort_levels_monotonic()
if level is not None:
_, indexer = target.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(target, ABCMultiIndex):
indexer = lexsort_indexer(
target._get_codes_for_sorting(), orders=ascending, na_position=na_position
)
else:
# Check monotonic-ness before sort an index (GH 11080)
if (ascending and target.is_monotonic_increasing) or (
not ascending and target.is_monotonic_decreasing
):
return None
indexer = nargsort(
target, kind=kind, ascending=ascending, na_position=na_position
)
return indexer
def get_group_index(labels, shape, sort: bool, xnull: bool):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels : sequence of arrays
Integers identifying levels at each location
shape : sequence of ints
Number of unique levels at each location
sort : bool
If the ranks of returned ids should match lexical ranks of labels
xnull : bool
If true nulls are excluded. i.e. -1 values in the labels are
passed through.
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
Notes
-----
The length of `labels` and `shape` must be identical.
"""
def _int64_cut_off(shape) -> int:
acc = 1
for i, mul in enumerate(shape):
acc *= int(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype="i8")
out = stride * labels[0].astype("i8", subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape) -> bool:
the_prod = 1
for x in shape:
the_prod *= int(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError("cannot deconstruct factorized group indices!")
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull: bool):
"""
Reconstruct labels from observed group ids.
Parameters
----------
xnull : bool
If nulls are excluded; i.e. -1 labels are passed through.
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype="i8")
shape = np.asarray(shape, dtype="i8") + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype("i8", subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress: bool = True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(
keys, orders=None, na_position: str = "last", key: Optional[Callable] = None
):
"""
Performs lexical sorting on a set of keys
Parameters
----------
keys : sequence of arrays
Sequence of ndarrays to be sorted by the indexer
orders : boolean or list of booleans, optional
Determines the sorting order for each element in keys. If a list,
it must be the same length as keys. This determines whether the
corresponding element in keys should be sorted in ascending
(True) or descending (False) order. if bool, applied to all
elements as above. if None, defaults to True.
na_position : {'first', 'last'}, default 'last'
Determines placement of NA elements in the sorted list ("last" or "first")
key : Callable, optional
Callable key function applied to every element in keys before sorting
.. versionadded:: 1.0.0
"""
from pandas.core.arrays import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
keys = [ensure_key_mapped(k, key) for k in keys]
for k, order in zip(keys, orders):
cat = | Categorical(k, ordered=True) | pandas.core.arrays.Categorical |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import sys
import glob
import pandas as pd
import numpy as np
from ngskit.utils import dna
#form dna_util import *
from common import *
# Pipelines
def lentivirus_combine(data_path = '/home/ccorbi/Work/Beagle/optim_lib/Kim/demultpx_barcodeOnly/Sequences/',
gaps='fill', fill_value=0, time_points=[0,3,7,14],
cell_times = {'HEK293T':[0,1.8,3.9,9.4],'RWP1':[0,2.0,4.1,9.7]}):
"""Read data from the blast ouput files. Apply DESEQ normalization.
Merge Replicas and average the reads, calc dropout score.
One library at a time.
:param str data_path: path with the where the files are located
:param str gaps: Behaivor with the gaps, by default fill, fill the gaps with one read.
:param func normfunction: Function to normalize the reads, by default log10
:param list time_points: list contain the time points info [0,1,4,10...]
:param dict cell_times: Dictionary where key is the cell lines name and
the values a list with the Double time of the cell line for each time point. [0,2.4,4.5,10]
This must match the time points.
:param bool normed: Divison all the time points by the biggest value in the time serie
:param bool itemization: return slope for each time point and the dropscore
:param int scalar: scalar to multiply dropscore [increse the signal]
:returns:
:rtype dict: The information for each lib and cell lines is merge toghether in a
dataframe. Each DataFrame is placed in a dictionary with key [lib_cellline]
"""
alldata = dict()
# Check if the number of times match the doubling times
for times in cell_times.values():
assert len(times) == len(time_points)
for cellline in cell_times.keys():
print('Reading {}'.format(cellline))
#get reads for all replicas in time point
# init vars
# DataFrame
poll_of_replicas = pd.DataFrame(columns=['referenceId'])
# recollect columns with reads info, for normalize
columns_reads = list()
# for group replicas (average)
columns_replicas = dict()
for t in time_points:
# init dict
timereplicas = glob.glob(data_path+'*%s*T%i*.out' % (cellline,t))
columns_replicas[t] = list()
#Get reads for replica, normal and put it toghet
# Read all replicas for a T point
for replica in timereplicas:
print('Parsing Time {} - replica {}'.format(t, replica))
# Read data for replica
# No simple norm
raw_data = read_bowtie(replica, norm=False)
# Get name replica and label it
name_replica = replica.split('/')[-1]
columns_reads.append('Reads_{}'.format(name_replica))
columns_replicas[t].append('nReads_{}'.format(name_replica))
raw_data.rename(columns={'Reads':'Reads_{}'.format(name_replica)}, inplace=True)
# 'nReads':'nReads_{}'.format(name_replica)}, inplace=True)
# Join replicas
poll_of_replicas = | pd.merge(poll_of_replicas, raw_data, on=['referenceId'], how='outer') | pandas.merge |
######################################################################
## DeepBiome
## - Reader
##
## July 10. 2019
## Youngwon (<EMAIL>)
##
## Reference
## - Keras (https://github.com/keras-team/keras)
######################################################################
import os
import sys
import json
import timeit
import copy
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, train_test_split
# from sklearn.preprocessing import scale, minmax_scale, robust_scale
from sklearn.preprocessing import MinMaxScaler
from keras.utils import to_categorical
# from keras.utils import to_categorical
from . import logging_daily
########################################################################################################
# Base reader
########################################################################################################
class BaseReader(object):
"""Inherit from this class when implementing new readers."""
def __init__(self, log, verbose=True):
# def __init__(self, log, path_info, network_info, verbose=True):
self.log = log
self.verbose = verbose
def read_dataset(self, data_path):
raise NotImplementedError()
def get_dataset(self, train_index):
return NotImplementedError()
def get_training_validation_index(self, idx, validation_size=0.2):
return train_test_split(idx, test_size = validation_size)
########################################################################################################
### MicroBiome Reader
########################################################################################################
class MicroBiomeReader(BaseReader):
def __init__(self, log, path_info, verbose=True):
super(MicroBiomeReader,self).__init__(log, verbose)
self.path_info = path_info
# self.network_info = network_info
def read_dataset(self, x_path, y_path, sim): # TODO fix without sim...
if self.verbose:
self.log.info('-----------------------------------------------------------------------')
self.log.info('Construct Dataset')
self.log.info('-----------------------------------------------------------------------')
self.log.info('Load data')
x = pd.read_csv(x_path)
if y_path != None: y = pd.read_csv(y_path)
# Normalize X with min-max normalization
mat = np.matrix(x)
prepro = MinMaxScaler()
prepro.fit(mat)
self.x_label = np.array(x.columns)
x = pd.DataFrame(prepro.transform(mat), columns = list(x.columns))
self.x = np.array(x, dtype=np.float32)
if y_path != None:
try:
y = pd.DataFrame(y.iloc[:, sim]) #.merge(pd.DataFrame(1-y.iloc[:, sim]), left_index = True, right_index = True)
except:
y = pd.DataFrame(y) #.merge(pd.DataFrame(1-y.iloc[:, sim]), left_index = True, right_index = True)
self.num_classes, self.y = self._set_problem(y)
# Covarates
try: self.continuous_variabl_paths = [cov.strip() for cov in self.path_info['covariates_info']['continuous_variables'].split(',') if '.csv' in cov]
except: self.continuous_variabl_paths = []
try: self.categorical_variable_paths = [cov.strip() for cov in self.path_info['covariates_info']['categorical_variables'].split(',') if '.csv' in cov]
except: self.categorical_variable_paths = []
if len(self.continuous_variabl_paths) > 0:
self.cov_conti = pd.DataFrame()
for i in range(len(self.continuous_variabl_paths)):
cov = pd.read_csv(self.continuous_variabl_paths[i])
cov.columns = [name.title() for name in cov.columns]
self.cov_conti = self.cov_conti.join(cov, how='right')
self.cov_conti = self.cov_conti.astype(np.float32)
if len(self.categorical_variable_paths) > 0:
self.cov_categorical = pd.DataFrame()
for i in range(len(self.categorical_variable_paths)):
cov = pd.read_csv(self.categorical_variable_paths[i])
cov_name = cov.columns[0]
cov = to_categorical(cov, dtype='int32')
cov = | pd.DataFrame(cov) | pandas.DataFrame |
import numpy as np
import pandas as pd
def get_processed_data(sample):
# loading
raw_tb = pd.read_csv('data/fifa.csv')
raw_tb = raw_tb[:sample]
selected_columns = ['Age','Wage','Crossing', 'Finishing', 'BallControl','Curve','LongPassing', 'Agility','ShotPower','Stamina','LongShots','Aggression','Positioning', 'Marking']
#Crossing,Finishing,HeadingAccuracy,ShortPassing,Volleys,Dribbling,Curve,FKAccuracy,LongPassing,BallControl,Acceleration,SprintSpeed,Agility,Reactions,Balance,ShotPower,Jumping,Stamina,Strength,LongShots,Aggression,Interceptions,Positioning,Vision,Penalties,Composure,Marking,StandingTackle,SlidingTackle,GKDiving,GKHandling,GKKicking,GKPositioning,GKReflexes
_tb = raw_tb[selected_columns]
_tb.loc[:, 'Wage'] = | pd.to_numeric(_tb.loc[:, 'Wage'].str[3:-1]) | pandas.to_numeric |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = | pd.read_csv('mushrooms.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from utils.random import scaled_inverse_chi_squared
class ThompsonSamplingGaussianSicqPrior(object):
def __init__(self, N, save_log=False):
self.N = N
self.ks = np.ones(N)
self.mus = np.zeros(N)
self.vs = np.ones(N)
self.sigmas = np.ones(N)
self.save_log = save_log
self.variances = []
self.thetas = []
def initialize(self):
self.ks = np.ones(self.N)
self.mus = np.zeros(self.N)
self.vs = np.ones(self.N)
self.sigmas = np.ones(self.N)
self.variances = []
self.thetas = []
def select_arm(self):
return np.argmax(self.estimate_mean())
def estimate_mean(self):
# For each arm i=1,...,N, sample random value from sicq distribution
variance = [scaled_inverse_chi_squared(self.vs[i], self.sigmas[i])
for i in range(self.N)]
# For each arm i=1,...,N, sample random value from normal distribution
theta = [np.random.normal(self.mus[i],
np.math.sqrt(variance[i] / self.ks[i]))
for i in range(self.N)]
if self.save_log:
self.thetas.append(theta)
self.variances.append(variance)
return theta
def update_param(self, arm_id, reward):
sigma = self.sigmas[arm_id]
v = self.vs[arm_id]
mu = self.mus[arm_id]
k = self.ks[arm_id]
d = reward - mu
# update parameter of sicq distribution
self.sigmas[arm_id] = sigma * v / (v + 1.0) + d * d * k / (
(v + 1.0) * (k + 1.0))
self.vs[arm_id] = v + 1.0
# update parameter of normal distribution
self.mus[arm_id] = (mu * k + reward) / (k + 1.0)
self.ks[arm_id] = k + 1.0
def save(self, folder_name):
if self.save_log:
self.thetas = | pd.DataFrame(self.thetas) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: | pd.Timestamp("1961-03-01 00:00:00") | pandas.Timestamp |
import pandas as pd
import requests
from bs4 import BeautifulSoup
import time
import random
# create the progress bar function for use later during scraping (credit to github.com/marzukr)
def progbar(curr, total, full_progbar):
frac = curr/total
filled_progbar = round(frac*full_progbar)
print('\r', '#'*filled_progbar + '-'*(full_progbar-filled_progbar), '[{:>7.2%}]'.format(frac), end='')
# randomly chooses an agent from a text file and returns it in the headers variable to avoid triggering spam detection
def agent(txt):
agentFile = open(txt, 'r')
headersList = agentFile.readlines()
randomAgent = headersList[random.randint(1,1000)]
randomAgent = (randomAgent[0:len(randomAgent)-2])
headers = {'user-agent': randomAgent}
return headers
# creates blank list for use later when creating the data frame, we also created the user-agent header here
records = []
# checks to see if we can even access the page
page = ''
while page == '':
try:
page = requests.get("http://services.runescape.com/m=itemdb_oldschool/viewitem?obj=13190", headers = agent('agentList.txt'))
break
except:
print("Connection refused by server..")
print("Retrying in 5 seconds..")
time.sleep(5)
continue
# sets an item finds its value, recurses for every item
item = 0
while item <= 21853:
time.sleep(random.randint(0,3))
item = str(item)
# finds price by entering the ID into the URL and scraping from the corresponding page
r_price = requests.get("http://services.runescape.com/m=itemdb_oldschool/viewitem?obj="+item, headers = agent('agentList.txt'))
priceSoup = BeautifulSoup(r_price.text, 'html.parser')
# checks for a specific statement to see if an item can even have a price value
checkForItem = priceSoup.find_all('h3')
if checkForItem[0].text != "Sorry, there was a problem with your request.":
# this url will be used to find the item name by using its ID
r_name = requests.get("https://www.runelocus.com/item-details/?item_id="+item, headers = headers)
nameSoup = BeautifulSoup(r_name.text, 'html.parser')
nameResults = nameSoup.find_all('h2')
# modifies nameList results to only show the name
nameList = nameResults[0].text
name = nameList[19:(len(nameList)-1)]
# locates the span title containing the price and strips the span + assigns to variable
priceResults = priceSoup.find_all('span')
price = priceResults[4].text
else:
price = "NULL"
if price != "NULL":
# limits to 10k http requests before sleeping for 1 hour (sleep time may need to be altered depending on Jagex max request permissions
if int(item)/10000 != 1 or int(item)/10000 != 2 or int(item)/10000 != 3:
date = pd.datetime.now().date()
date = str(date) # conversion to string needed to avoid list containing 'datetime.date(xxxx, x, x)' for date value
records.append((date, name, price.strip())) # appends to record for use in creating pd data frame
# converts item to an int for progress bar then back into a str
item = int(item)
progbar(item, 21853, 20)
item = str(item)
else:
sleep(3600)
item = int(item)
item += 1
# create data frame with all items from records list
df = | pd.DataFrame(records, columns=['date', 'item', 'price']) | pandas.DataFrame |
from __future__ import absolute_import
import collections
import gzip
import logging
import os
import sys
import multiprocessing
import threading
import numpy as np
import pandas as pd
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
from data_utils import get_file
logger = logging.getLogger(__name__)
SEED = 2017
np.set_printoptions(threshold=np.nan)
np.random.seed(SEED)
def get_p1_file(link):
fname = os.path.basename(link)
return get_file(fname, origin=link, cache_subdir='Pilot1')
def scale(df, scaling=None):
"""Scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to scale
scaling : 'maxabs', 'minmax', 'std', or None, optional (default 'std')
type of scaling to apply
"""
if scaling is None or scaling.lower() == 'none':
return df
df = df.dropna(axis=1, how='any')
# Scaling data
if scaling == 'maxabs':
# Normalizing -1 to 1
scaler = MaxAbsScaler()
elif scaling == 'minmax':
# Scaling to [0,1]
scaler = MinMaxScaler()
else:
# Standard normalization
scaler = StandardScaler()
mat = df.as_matrix()
mat = scaler.fit_transform(mat)
df = | pd.DataFrame(mat, columns=df.columns) | pandas.DataFrame |
"""Top-level API, including the main LinkedDataFrame class"""
import attr
from collections import deque
from deprecated import deprecated
import numexpr as ne
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Index, MultiIndex
from typing import Any, Dict, Deque, Hashable, List, Optional, Set, Tuple, Type, Union
from .constants import LinkageSpecificationError, LinkAggregationRequired
from .missing_data import SeriesFillManager, infer_dtype, PandasDtype
from ..parsing.constants import NAN_STR, NAN_VAL, NEG_INF_STR, NEG_INF_VAL
from ..parsing.expressions import Expression
from ..parsing.expr_items import EvaluationMode
from ..utils import convert_series, to_numpy
_LabelType = Union[str, List[str]]
_NUMERIC_AGGREGATIONS = {'max', 'min', 'mean', 'median', 'prod', 'std', 'sum', 'var', 'quantile'}
_NON_NUMERIC_AGGREGATIONS = {'count', 'first', 'last', 'nth'}
_SUPPORTED_AGGREGATIONS = sorted(_NUMERIC_AGGREGATIONS | _NON_NUMERIC_AGGREGATIONS)
_NUMERIC_TYPES = {PandasDtype.INT_NAME, PandasDtype.UINT_NAME, PandasDtype.FLOAT_NAME, PandasDtype.BOOL_NAME}
class _IndexMeta:
labels: List[str] = attr.ib(converter=lambda x: [x] if isinstance(x, str) else list(x))
from_row_labels: bool = attr.ib()
def __init__(self, labels: Union[str, List[str]] = None, from_row_labels: bool = True):
if isinstance(labels, str):
labels = [labels]
elif labels is None:
from_row_labels = True
self.labels = labels
self.from_row_labels = from_row_labels
def __str__(self):
if self.from_row_labels:
return f'From index: {self.labels}'
return f'From columns: {self.labels}'
def get_indexer(self, frame: DataFrame) -> Union[Index, MultiIndex]:
if self.labels is None:
return frame.index
arrays = []
if self.from_row_labels:
if len(self.labels) > frame.index.nlevels:
raise LinkageSpecificationError('Cannot specify more levels than in the index')
for label in self.labels:
# `get_level_values` works on both Index and MultiIndex objects and accepts both
# integer levels AND level names
try:
arrays.append(frame.index.get_level_values(label))
except KeyError:
raise LinkageSpecificationError(f'Level `{label}` not in the index')
else:
for label in self.labels:
if label not in frame:
raise LinkageSpecificationError(f'Column `{label}` not in the columns')
arr = to_numpy(frame[label])
arrays.append(arr)
if len(arrays) == 1:
name = self.labels[0]
return Index(arrays[0], name=name)
return MultiIndex.from_arrays(arrays, names=self.labels)
def nlevels(self, frame: DataFrame) -> int:
if self.labels is None:
return frame.index.nlevels
return len(self.labels)
def validate(self, frame: DataFrame):
if self.labels is None:
return # Use the index, which is always available
frame_items = set(frame.index.levels) if self.from_row_labels else set(frame.columns)
item_name = 'index' if self.from_row_labels else 'columns'
for name in self.labels:
assert name in frame_items, f'Could not find `{name}` in the {item_name}'
class _LinkMeta:
owner: 'LinkedDataFrame'
other: Union['LinkedDataFrame', DataFrame]
_other_has_links: bool
aggregation_required: bool
self_meta: _IndexMeta
other_meta: _IndexMeta
flat_indexer: Optional[np.ndarray]
other_grouper: Optional[np.ndarray]
missing_indices: Optional[Union[np.ndarray, List]]
@staticmethod
def create(owner: 'LinkedDataFrame', other: Union['LinkedDataFrame', DataFrame], self_labels: Union[List[str], str],
self_from_row_labels: bool, other_labels: Union[List[str], str], other_from_row_labels: bool,
precompute: bool = True) -> '_LinkMeta':
self_meta = _IndexMeta(self_labels, self_from_row_labels)
other_meta = _IndexMeta(other_labels, other_from_row_labels)
assert self_meta.nlevels(owner) == other_meta.nlevels(other)
other_has_links = isinstance(other, LinkedDataFrame)
link = _LinkMeta(owner, other, self_meta, other_meta, other_has_links)
link._determine_aggregation(precompute)
return link
def __init__(self, owner, other, self_meta, other_meta, other_has_links):
self.owner = owner
self.other = other
self.self_meta = self_meta
self.other_meta = other_meta
self._other_has_links = other_has_links
self.aggregation_required = False
self.flat_indexer = None
self.other_grouper = None
self.missing_indices = None
def _determine_aggregation(self, precompute):
self_indexer = self.self_meta.get_indexer(self.owner)
other_indexer = self.other_meta.get_indexer(self.other)
self_unique = self_indexer.is_unique
other_unique = other_indexer.is_unique
if self_unique and other_unique:
flag = False
elif self_unique: # Other is not unique
flag = True
elif other_unique:
flag = False
else:
raise RuntimeError('Many-to-many links are not permitted')
self.aggregation_required = flag
if precompute:
self._make_indexer(self_indexer, other_indexer)
def _get_indexers(self) -> Tuple[Index, Index]:
return self.self_meta.get_indexer(self.owner), self.other_meta.get_indexer(self.other)
def _make_indexer(self, self_indexer: Index, other_indexer: Index):
if self.aggregation_required:
group_ints, group_order = other_indexer.factorize()
self.other_grouper = group_ints
self.flat_indexer, self.missing_indices = group_order.get_indexer_non_unique(self_indexer)
else: # Performance-tuned fast paths for constructing indexers
if self_indexer.equals(other_indexer): # Indexers are identical
self.flat_indexer = np.arange(len(other_indexer))
self.missing_indices = np.array([], dtype=int)
elif len(self_indexer.difference(other_indexer)) == 0: # No missing values
# Taking the difference is faster than `all(.isin())`
self.missing_indices = np.array([], dtype=int)
self.flat_indexer = other_indexer.get_indexer(self_indexer)
else: # All other cases
self.flat_indexer, self.missing_indices = other_indexer.get_indexer_non_unique(self_indexer)
@property
def chained(self) -> bool:
return self._other_has_links
@property
def indexer_and_missing(self) -> Tuple[np.ndarray, np.ndarray]:
if (self.flat_indexer is None) or (self.missing_indices is None):
self.precompute()
return self.flat_indexer, self.missing_indices
def precompute(self):
"""Top-level method to precompute the indexer"""
self._make_indexer(*self._get_indexers())
def copy_meta(self) -> '_LinkMeta':
copied = _LinkMeta(self.owner, self.other, self.self_meta, self.other_meta, self._other_has_links)
copied.aggregation_required = self.aggregation_required
return copied
def copy(self, indices=None) -> '_LinkMeta':
copied = self.copy_meta()
if self.flat_indexer is not None:
copied.flat_indexer = self.flat_indexer[indices] if indices is not None else self.flat_indexer
if isinstance(self.missing_indices, list):
copied.missing_indices = []
elif isinstance(self.missing_indices, np.ndarray):
if (indices is not None) and (len(self.missing_indices) > 0):
mask = np.isin(self.missing_indices, indices)
copied.missing_indices = self.missing_indices[mask]
else:
copied.missing_indices = self.missing_indices[:]
if self.other_grouper is not None:
copied.other_grouper = self.other_grouper
return copied
class LinkedDataFrame(DataFrame):
_links: Dict[str, _LinkMeta]
_identified_links: Set[str]
_class_filler: SeriesFillManager = SeriesFillManager()
_instance_filler: SeriesFillManager
_column_fills: dict
# temporary properties
_internal_names = DataFrame._internal_names + ['_links', '_identified_links', '_instance_filler', '_column_fills']
_internal_names_set = set(_internal_names)
# normal properties
_metadata = []
# region Static Readers
@staticmethod
def read_csv(*args, **kwargs) -> 'LinkedDataFrame':
"""Wrapper for pd.read_csv() that returns a LinkedDataFrame instead"""
return LinkedDataFrame( | pd.read_csv(*args, **kwargs) | pandas.read_csv |
#! -*- coding:utf-8 -*-
import os
import re
import gc
import sys
import json
import codecs
import random
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from random import choice
import tensorflow as tf
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
import keras.backend as K
from keras.layers import *
from keras.callbacks import *
from keras.models import Model
from keras.optimizers import Adam
from keras.initializers import glorot_uniform
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
tqdm.pandas()
seed = 2019
random.seed(seed)
tf.set_random_seed(seed)
np.random.seed(seed)
warnings.filterwarnings('ignore')
################################################################
data_path = '../../dataSet/'
train = pd.read_csv(data_path + 'Round2_train.csv', encoding='utf-8')
train2= pd.read_csv(data_path + 'Train_Data.csv', encoding='utf-8')
train=pd.concat([train, train2], axis=0, sort=True)
test = pd.read_csv(data_path + 'round2_test.csv', encoding='utf-8')
train = train[train['entity'].notnull()]
test = test[test['entity'].notnull()]
train=train.drop_duplicates(['title','text','entity','negative','key_entity']) # 去掉重复的data
print(train.shape) ###(10526, 6)
print(test.shape) ####((9997, 4)
def get_or_content(y,z):
s=''
if str(y)!='nan':
s+=y
if str(z)!='nan':
s+=z
return s
#获取title+text
train['content']=list(map(lambda y,z: get_or_content(y,z),train['title'],train['text']))
test['content']=list(map(lambda y,z: get_or_content(y,z),test['title'],test['text']))
def entity_clear_row(entity,content):
entities = entity.split(';')
entities.sort(key=lambda x: len(x))
n = len(entities)
tmp = entities.copy()
for i in range(n):
entity_tmp = entities[i]
#长度小于等于1
if len(entity_tmp)<=1:
tmp.remove(entity_tmp)
continue
if i + 1 >= n:
break
for entity_tmp2 in entities[i + 1:]:
if entity_tmp2.find(entity_tmp) != -1 and (
entity_tmp2.find('?') != -1 or content.replace(entity_tmp2, '').find(entity_tmp) == -1):
tmp.remove(entity_tmp)
break
return ';'.join(tmp)
train['entity']=list(map(lambda entity,content:entity_clear_row(entity,content),train['entity'],train['content']))
test['entity']=list(map(lambda entity,content:entity_clear_row(entity,content),test['entity'],test['content']))
test['text'] = test.apply(lambda index: index.title if index.text is np.nan else index.text, axis=1)
train = train[(train['entity'].notnull()) & (train['negative'] == 1)] ###
emotion = pd.read_csv('../../Emotion_Model/submit/emotion_voting_three_models.csv', encoding='utf-8')
emotion = emotion[emotion['negative'] == 1]
test = emotion.merge(test, on='id', how='left')
################################################################
train_id_entity = train[['id', 'entity']]
train_id_entity['entity'] = train_id_entity['entity'].apply(lambda index: index.split(';'))
id, entity = [], []
for index in range(len(train_id_entity['entity'])):
entity.extend(list(train_id_entity['entity'])[index])
id.extend([list(train_id_entity['id'])[index]] * len(list(train_id_entity['entity'])[index]))
train_id_entity = pd.DataFrame({'id': id, 'entity_label': entity})
test_id_entity = test[['id', 'entity']]
test_id_entity['entity'] = test_id_entity['entity'].apply(lambda index: index.split(';'))
id, entity = [], []
for index in range(len(test_id_entity['entity'])):
entity.extend(list(test_id_entity['entity'])[index])
id.extend([list(test_id_entity['id'])[index]] * len(list(test_id_entity['entity'])[index]))
test_id_entity = pd.DataFrame({'id': id, 'entity_label': entity})
train = train.merge(train_id_entity, on='id', how='left')
train['flag'] = train.apply(lambda index: 1 if index.key_entity.split(';').count(index.entity_label) >= 1 else 0, axis=1)
test = test.merge(test_id_entity, on='id', how='left')
################################################################
print(train.shape)
print(test.shape)
def extract_feature(data):
data['sub_word_num'] = data.apply(lambda index: index.entity.count(index.entity_label) - 1, axis=1)
data['question_mark_num'] = data['entity_label'].apply(lambda index: index.count('?'))
data['occur_in_title_num'] = data.apply(lambda index: 0 if index.title is np.nan else index.title.count(index.entity_label), axis=1)
data['occur_in_text_num'] = data.apply(lambda index: 0 if index.text is np.nan else index.text.count(index.entity_label), axis=1)
data['occur_in_partial_text_num'] = data.apply(lambda index: 0 if index.text is np.nan else index.text[:507].count(index.entity_label), axis=1)
data['occur_in_entity'] = data.apply(lambda index: 0 if index.text is np.nan else index.entity.count(index.entity_label) - 1, axis=1)
data['is_occur_in_article'] = data.apply(lambda index: 1 if (index.occur_in_title_num >= 1) | (index.occur_in_text_num >= 1) else 0, axis=1)
return data
train = extract_feature(train)
test = extract_feature(test)
print(train.columns)
train['entity_len'] = train['entity_label'].progress_apply(lambda index: len(index))
test['entity_len'] = test['entity_label'].progress_apply(lambda index: len(index))
train[train['entity_len'] == 1].shape
train = train[train['entity_len'] > 1]
test[test['entity_len'] == 1].shape
test = test[test['entity_len'] > 1]
train_feature = train[['sub_word_num', 'question_mark_num', 'occur_in_title_num', 'occur_in_text_num', 'is_occur_in_article', 'occur_in_entity', 'occur_in_partial_text_num']]
test_feature = test[['sub_word_num', 'question_mark_num', 'occur_in_title_num', 'occur_in_text_num', 'is_occur_in_article', 'occur_in_entity', 'occur_in_partial_text_num']]
# Normalization
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
train_feature = scaler.fit_transform(train_feature)
test_feature = scaler.fit_transform(test_feature)
def get_other_content(x,y):
entitys=x.split(";")
if len(entitys)<=1:
return np.nan
l=[]
for e in entitys:
if e!=y:
l.append(e)
return ';'.join(l)
train['other_entity']=list(map(lambda x,y :get_other_content(x,y),train['entity'],train['entity_label']))
test['other_entity']=list(map(lambda x,y :get_other_content(x,y),test['entity'],test['entity_label']))
def get_content(x,y):
if str(y)=='nan':
return x
y=y.split(";")
y = sorted(y, key=lambda i:len(i),reverse=True)
for i in y:
x = '其他实体'.join(x.split(i))
return x
train['text']=list(map(lambda x,y: get_content(x,y), train['text'], train['other_entity']))
test['text']=list(map(lambda x,y: get_content(x,y), test['text'], test['other_entity']))
maxlen = 509 # 序列长度
# 模型下载链接 tensorflow版:https://github.com/ymcui/Chinese-BERT-wwm#%E4%B8%AD%E6%96%87%E6%A8%A1%E5%9E%8B%E4%B8%8B%E8%BD%BD
bert_path = '../../PreTrainModel/chinese_roberta_wwm_large_ext_L-24_H-1024_A-16/'
config_path = bert_path + 'bert_config.json'
checkpoint_path = bert_path + 'bert_model.ckpt'
dict_path = bert_path + 'vocab.txt'
token_dict = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict) # 给每个token 按序编号
class OurTokenizer(Tokenizer):
def _tokenize(self, text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]') # space类用未经训练的[unused1]表示
else:
R.append('[UNK]') # 剩余的字符是[UNK]
return R
tokenizer = OurTokenizer(token_dict)
def seq_padding(X, padding=0):
L = [len(x) for x in X]
ML = max(L)
return np.array([np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X])
class data_generator:
def __init__(self, data, feature, batch_size=4, shuffle=True):
self.data = data
self.batch_size = batch_size
self.shuffle = shuffle
self.feature = feature
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
def __len__(self):
return self.steps
def __iter__(self):
while True:
idxs = list(range(len(self.data)))
if self.shuffle:
np.random.shuffle(idxs)
X1, X2, Y, Fea = [], [], [], []
for i in idxs:
d = self.data[i]
fea = self.feature[i] # add feature
first_text = d[0]
second_text = d[2][:maxlen - d[1]]
x1, x2 = tokenizer.encode(first=first_text, second=second_text) # , max_len=512
y = d[3]
Fea.append(fea)
X1.append(x1)
X2.append(x2)
Y.append([y])
if len(X1) == self.batch_size or i == idxs[-1]:
X1 = seq_padding(X1)
X2 = seq_padding(X2, padding=1)
Fea = seq_padding(Fea)
Y = seq_padding(Y)
yield [X1, X2, Fea], Y[:, 0, :]
[X1, X2, Y, Fea] = [], [], [], []
from keras.metrics import top_k_categorical_accuracy
from keras.metrics import categorical_accuracy
def acc_top2(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=1)
def f1_metric(y_true, y_pred):
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def build_bert(nclass):
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)
for l in bert_model.layers:
# print(l)
l.trainable = True
x1_in = Input(shape=(None,))
x2_in = Input(shape=(None,))
x3_in = Input(shape=(train_feature.shape[1],))
feature = Dense(64, activation='relu')(x3_in)
x = bert_model([x1_in, x2_in])
x = Lambda(lambda x: x[:, 0])(x)
x = concatenate([x, feature])
p = Dense(nclass, activation='softmax')(x)
model = Model([x1_in, x2_in, x3_in], p)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(1e-5), # lr: 5e-5 3e-5 2e-5 epoch: 3, 4 batch_size: 16, 32
metrics=['accuracy', f1_metric]) # categorical_accuracy
print(model.summary())
return model
################################################################
from keras.utils import to_categorical
DATA_LIST = []
for data_row in train.iloc[:].itertuples():
DATA_LIST.append((data_row.entity_label, data_row.entity_len, data_row.text, to_categorical(data_row.flag, 2)))
DATA_LIST = np.array(DATA_LIST)
DATA_LIST_TEST = []
for data_row in test.iloc[:].itertuples():
DATA_LIST_TEST.append((data_row.entity_label, data_row.entity_len, data_row.text, to_categorical(0, 2)))
DATA_LIST_TEST = np.array(DATA_LIST_TEST)
################################################################
f1, acc = [], []
def run_cv(nfold, data, feature_train, data_label, data_test, feature_test):
kf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=seed).split(data, train['flag'])
train_model_pred = np.zeros((len(data), 2)) # 2
test_model_pred = np.zeros((len(data_test), 2)) # 2
for i, (train_fold, test_fold) in enumerate(kf):
X_train, X_valid, = data[train_fold, :], data[test_fold, :]
X_train_fea, X_valid_fea = feature_train[train_fold, :], feature_train[test_fold, :]
model = build_bert(2) # 2
early_stopping = EarlyStopping(monitor='val_acc', patience=2) # val_acc
plateau = ReduceLROnPlateau(monitor="val_acc", verbose=1, mode='max', factor=0.5, patience=1)
checkpoint = ModelCheckpoint('./model/' + str(i) + '.hdf5', monitor='val_acc',
verbose=2, save_best_only=True, mode='max',save_weights_only=True)
train_D = data_generator(X_train, X_train_fea, shuffle=True)
valid_D = data_generator(X_valid, X_valid_fea, shuffle=False)
test_D = data_generator(data_test, feature_test, shuffle=False)
model.fit_generator(
train_D.__iter__(),
steps_per_epoch=len(train_D), ## ?? ##
epochs=10,
validation_data=valid_D.__iter__(),
validation_steps=len(valid_D),
callbacks=[early_stopping, plateau, checkpoint],
verbose=2
)
model.load_weights('./model/' + str(i) + '.hdf5')
# return model
val = model.predict_generator(valid_D.__iter__(), steps=len(valid_D),verbose=0)
print(val)
score = f1_score(train['flag'].values[test_fold], np.argmax(val, axis=1))
acc_score = accuracy_score(train['flag'].values[test_fold], np.argmax(val, axis=1))
global f1, acc
f1.append(score)
acc.append(acc_score)
print('validate f1 score:', score)
print('validate accuracy score:', acc_score)
train_model_pred[test_fold, :] = val
test_model_pred += model.predict_generator(test_D.__iter__(), steps=len(test_D),verbose=0)
del model; gc.collect()
K.clear_session()
return train_model_pred, test_model_pred
################################################################
train_model_pred, test_model_pred = run_cv(10, DATA_LIST, train_feature, None, DATA_LIST_TEST, test_feature)
print('validate aver f1 score:', np.average(f1))
print('validate aver accuracy score:', np.average(acc))
np.save('weights/bert_prob_train_binary_label_add_feature_extend_trainSet-PreProcess-roberta-large-wwm-ext.npy', train_model_pred)
np.save('weights/bert_prob_test_binary_label_add_feature_extend_trainSet-PreProcess-roberta-large-wwm-ext.npy', test_model_pred)
################################################################
################################################################
def return_list(group):
return ';'.join(list(group))
sub = test.copy()
sub['label'] = [np.argmax(index) for index in test_model_pred]
test['prob'] = [index[1] for index in test_model_pred]
sub = sub[sub['label'] == 1]
key_entity = sub.groupby(['id'], as_index=False)['entity_label'].agg({'key_entity': return_list})
sub_id = set(test['id']) - set(key_entity['id'])
sub_test = test[test['id'].isin(sub_id)]
sub_test = sub_test.sort_values(by=['id', 'prob'], ascending=False).drop_duplicates(['id'], keep='first')
sub_test['key_entity'] = sub_test['entity_label']
key_entity = pd.concat([key_entity, sub_test[['id', 'key_entity']]], axis=0, ignore_index=True)
test_2 = | pd.read_csv(data_path + 'round2_test.csv', encoding='utf-8') | pandas.read_csv |
import pandas as pd
from django.shortcuts import render, redirect
import os
import pandas as pd
from .associations import (
generate_associated_dt_annotation,
generate_associated_coord_annotation,
)
import json
import logging
from .form_population import Form
from utils.cache_helper import cache_get, cache_set
from .helper_functions import gen_samples
from .one_primary_set import one_primary_set
from django.views import View
class Annotation(View):
"""View responsible for handling the annotations post and serving the form [get and post]"""
def set_vars(self, uuid, col):
self.uuid = uuid
self.strftime_conversion = {
"Y": "Year",
"y": "Year",
"m": "Month",
"d": "Day",
"j": "Day",
"H": "Hour",
"I": "Hour",
"M": "Minute",
"S": "Second",
}
self.col = col
self.gc = self.get_gc()
self.annotations = self.get_annotations()
self.samples = self.get_samples()
self.fp = f"data/{self.uuid}/annotations.json"
self.context = {"uuid": self.uuid, "col": self.col}
def date_association_post(self, post):
"""post handling if post has a dateAssociation"""
col = self.col
associations = {}
if post.get("dateAssociation", False):
for x in post:
if x.find("associated_") != -1:
associations[x] = post[x]
if "primary_time" in post:
pt = True
else:
pt = False
associations[post.get("Time")] = col
to_del = []
for x in associations:
if associations[x] == "No Associable Column":
to_del.append(x)
for x in to_del:
del associations[x]
qualifies = None
if "qualifyColumn" in post:
qualifies = post.getlist("qualifyColumn")
generated_annotations = generate_associated_dt_annotation(
associations,
col,
pt,
description=post["Description"],
qualifies=qualifies,
)
for x in generated_annotations.keys():
self.annotations[x] = generated_annotations[x]
for y in self.annotations[x]["associated_columns"]:
if x == self.annotations[x]["associated_columns"][y]:
del self.annotations[x]["associated_columns"][y]
self.associations = associations
return True
def clear_annotations(self):
annotations, col, uuid = self.annotations, self.col, self.uuid
if annotations.get(col, {}).get("dateAssociation", False):
# Clear associated column
for x in annotations.get(col, {}).get("associated_columns", []):
if x != col:
del annotations[x]
if annotations.get(col, {}).get("Coord_Pair_Form", False):
if annotations.get(annotations[col]["Coord_Pair_Form"], False):
del annotations[annotations[col]["Coord_Pair_Form"]]
logging.info(annotations)
logging.info(col)
if annotations.get(col):
logging.info(f"deleteting {col}")
del annotations[col]
self.annotations = annotations
self.save_annotations()
return True
def geo_pair_post(self, post):
col = self.col
if "isGeoPair" in post:
other_col = post["Coord_Pair_Form"]
if post["Geo"] == "Latitude":
geo_type = "Longitude"
elif post["Geo"] == "Longitude":
geo_type = "Latitude"
if "primary_geo" in post:
pg = True
else:
pg = False
other_anno = generate_associated_coord_annotation(
other_col, geo_type, col, pg, description=post["Description"]
)
for x in other_anno.keys():
self.annotations[x] = other_anno[x]
def post(self, request, *args, **kwargs):
"""post handling for annotation"""
self.set_vars(kwargs.get("uuid"), request.GET.get("col"))
post = request.POST
annotations, uuid, col = self.annotations, self.uuid, self.col
if post.get("Clear", False):
# handling for clear button
self.clear_annotations()
logging.info(
f"{cache_get(uuid, 'logging_preface', None)} - Cleared annotations for col: {col}"
)
return redirect(f"../{uuid}", request)
# TODO annotations var replace
logging_preface = cache_get(uuid, "logging_preface", None)
logging.info(f"{logging_preface} - posted column: {col}")
if post.get("primary_geo", False) and post.get("Geo", False):
# write over existing primary_geo if new one is posted
geo_type = post.get("Geo")
if geo_type in ["Latitude", "Longitude", "Coordinates"]:
to_del = []
for x in annotations:
if annotations.get(x, {}).get("primary_geo", False):
to_del.append(x)
for x in to_del:
del annotations[x]["primary_geo"]
to_del = []
if annotations.get(col):
# delete existing annotations for this col and associated ones if it is reposted
to_del = [col]
for x in annotations.get(col, {}).get("associated_columns", []):
if annotations.get(x, False):
to_del.append(x)
for x in to_del:
del annotations[x]
to_write = {}
self.annotations = annotations
# Delete any other primary_time annotations; this happens when the user
# ignores the warning.
if post.get("primary_time", False):
if cache_get(uuid, "primary_time", False):
for x in annotations:
if "primary_time" in annotations[x].keys():
del annotations[x]["primary_time"]
cache_set(uuid, "primary_time_set", True)
# Delete any other primary_geo - Geo combinations; this happens when the user
# ignores the warning.
if post.get("primary_geo", False) and post.get("Geo", False):
session_key = None
geo_type = post.get("Geo")
if geo_type in ["Country", "ISO2", "ISO3"]:
session_key = "primary_country_set"
elif geo_type == "State/Territory":
session_key = "primary_admin1_set"
elif geo_type == "County/District":
session_key = "primary_admin2_set"
elif geo_type == "Municipality/Town":
session_key = "primary_admin3_set"
elif geo_type in ["Latitude", "Longitude", "Coordinates"]:
session_key = "primary_coord_set"
if session_key:
# if request.session[session_key]:
if cache_get(uuid, session_key, False):
for x in annotations:
if annotations.get(x).get("primary_geo", False):
del annotations[x]["primary_geo"]
self.annotations = annotations
self.date_association_post(post)
self.geo_pair_post(post)
annotations = self.annotations
for x in post:
if x.find("csrfmiddle") == -1:
if post[x] == "true":
to_write[x] = True
else:
if x == "qualifyColumn":
to_write[x] = post.getlist(x)
else:
to_write[x] = post[x]
for x in post:
if x.find("associated_") != -1:
# don't write associated columns, that is handled in generate_associated_xyz
del to_write[x]
pruned_associations = {
x.replace("associated_", ""): self.associations[x]
for x in self.associations
if x.find("_format") == -1
}
if pruned_associations != {}:
to_write["associated_columns"] = pruned_associations
t_type = post["Time"]
to_write["format"] = self.associations[f"associated_{t_type}_format"]
to_del = []
if "associated_columns" in to_write:
for x in to_write["associated_columns"]:
if to_write["associated_columns"][x] == col:
to_del.append(x)
for x in to_del:
del to_write["associated_columns"][x]
# Handle aliases by generating a dictionary of key/values for each
# current and new alias pairing. Then remove those keys from the annotations object
# and add in an `aliases` key for the built dictionary.
alias_indices = [int(x.split("-")[-1]) for x in post if "alias-current" in x]
aliases = {}
for i in alias_indices:
aliases[post[f"alias-current-{i}"]] = post[f"alias-new-{i}"]
del to_write[f"alias-current-{i}"]
del to_write[f"alias-new-{i}"]
to_write["aliases"] = aliases
annotations[col] = to_write
self.annotations = annotations
self.save_annotations()
one_primary_set(annotations, {"uuid": self.uuid})
return redirect(f"../{uuid}", request)
def get_samples(self):
"""generate the samples for the ui if they are not present in the cache"""
uuid = self.uuid
if not cache_get(uuid, "new_samples", False):
df = pd.read_csv(f"data/{uuid}/raw_data.csv", nrows=10000)
gen_samples(uuid, df)
cache_set(uuid, "new_samples", True)
return cache_get(uuid, "annotation_samples")
def get_gc(self):
"""get geotime classifications for this column"""
with open(f"data/{self.uuid}/geotime_classification.json", "r") as infile:
return json.load(infile)
def save_annotations(self):
"""save annotations to file"""
with open(self.fp, "w") as outfile:
json.dump(self.annotations, outfile)
return True
def get_annotations(self):
"""get annotations from file"""
if "annotations.json" in os.listdir(f"data/{self.uuid}"):
with open(f"data/{self.uuid}/annotations.json", "r") as infile:
annotations = json.load(infile)
else:
annotations = {}
return annotations
def get(self, request, *args, **kwargs):
"""logic responsible for building the necessary context and serving the form"""
self.set_vars(kwargs.get("uuid"), request.GET.get("col"))
context, annotations, uuid, col = (
self.context,
self.annotations,
self.uuid,
self.col,
)
email = cache_get(uuid, "email", None)
logging_preface = cache_get(uuid, "logging_preface", None)
logging.info(f"{logging_preface} - Viewing column: {col}")
gc = self.gc
redir_col = annotations.get(col, {}).get("redir_col", False)
if redir_col:
if annotations.get(redir_col, False):
return redirect(
f'../annotate/{uuid}?col={annotations[col]["redir_col"]}',
request,
)
# if not request.session.get("new_samples", False):
samples = self.samples
df = | pd.DataFrame(samples) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license
# agreements. See the NOTICE file distributed with this work for additional
# information regarding copyright ownership. The Modin Development Team
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# This file is copied and adapted from:
# http://github.com/modin-project/modin/master/modin/pandas/test/test_general.py
import sys
import pytest
import pandas
import numpy as np
from numpy.testing import assert_array_equal
import ray
from ray.util.client.ray_client_helpers import ray_start_client_server
modin_compatible_version = sys.version_info >= (3, 7, 0)
modin_installed = True
if modin_compatible_version:
try:
import modin # noqa: F401
except ModuleNotFoundError:
modin_installed = False
skip = not modin_compatible_version or not modin_installed
# These tests are written for versions of Modin that require python 3.7+
pytestmark = pytest.mark.skipif(skip, reason="Outdated or missing Modin dependency")
if not skip:
from ray.tests.modin.modin_test_utils import df_equals
import modin.pandas as pd
# Module scoped fixture. Will first run all tests without ray
# client, then rerun all tests with a single ray client session.
@pytest.fixture(params=[False, True], autouse=True, scope="module")
def run_ray_client(request):
if request.param:
with ray_start_client_server() as client:
yield client
else:
# Run without ray client (do nothing)
yield
# Cleanup state before rerunning tests with client
ray.shutdown()
random_state = np.random.RandomState(seed=42)
# Size of test dataframes
NCOLS, NROWS = (2 ** 6, 2 ** 8)
# Range for values for test data
RAND_LOW = 0
RAND_HIGH = 100
# Input data and functions for the tests
# The test data that we will test our code against
test_data = {
"int_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.randint(
RAND_LOW, RAND_HIGH, size=(NROWS)
)
for i in range(NCOLS)
},
"float_nan_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
x
if (j % 4 == 0 and i > NCOLS // 2) or (j != i and i <= NCOLS // 2)
else np.NaN
for j, x in enumerate(
random_state.uniform(RAND_LOW, RAND_HIGH, size=(NROWS))
)
]
for i in range(NCOLS)
},
}
test_data["int_data"]["index"] = test_data["int_data"].pop(
"col{}".format(int(NCOLS / 2))
)
for col in test_data["float_nan_data"]:
for row in range(NROWS // 2):
if row % 16 == 0:
test_data["float_nan_data"][col][row] = np.NaN
test_data_values = list(test_data.values())
test_data_keys = list(test_data.keys())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isnull(pandas_df)
modin_result = pd.isnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notna(pandas_df)
modin_result = pd.notna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notnull(pandas_df)
modin_result = pd.notnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
def test_merge():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = pd.merge(modin_df, modin_df2, how=how)
pandas_result = pandas.merge(pandas_df, pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
s = pd.Series(frame_data.get("col1"))
with pytest.raises(ValueError):
pd.merge(s, modin_df2)
with pytest.raises(TypeError):
pd.merge("Non-valid type", modin_df2)
def test_pivot():
test_df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
df = pd.pivot(test_df, index="foo", columns="bar", values="baz")
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot(test_df["bar"], index="foo", columns="bar", values="baz")
def test_pivot_table():
test_df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
df = pd.pivot_table(
test_df, values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot_table(
test_df["C"], values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
def test_unique():
modin_result = pd.unique([2, 1, 3, 3])
pandas_result = pandas.unique([2, 1, 3, 3])
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(pd.Series([2] + [1] * 5))
pandas_result = pandas.unique(pandas.Series([2] + [1] * 5))
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")])
)
pandas_result = pandas.unique(
pandas.Series([pandas.Timestamp("20160101"), pandas.Timestamp("20160101")])
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Series(
[
pd.Timestamp("20160101", tz="US/Eastern"),
pd.Timestamp("20160101", tz="US/Eastern"),
]
)
)
pandas_result = pandas.unique(
pandas.Series(
[
pandas.Timestamp("20160101", tz="US/Eastern"),
pandas.Timestamp("20160101", tz="US/Eastern"),
]
)
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(
pd.Index(
[
pd.Timestamp("20160101", tz="US/Eastern"),
pd.Timestamp("20160101", tz="US/Eastern"),
]
)
)
pandas_result = pandas.unique(
pandas.Index(
[
pandas.Timestamp("20160101", tz="US/Eastern"),
pandas.Timestamp("20160101", tz="US/Eastern"),
]
)
)
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(pd.Series(pd.Categorical(list("baabc"))))
pandas_result = pandas.unique(pandas.Series(pandas.Categorical(list("baabc"))))
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
def test_to_datetime():
# DataFrame input for to_datetime
modin_df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
pandas_df = pandas.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
df_equals(pd.to_datetime(modin_df), pandas.to_datetime(pandas_df))
# Series input for to_datetime
modin_s = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 1000)
pandas_s = pandas.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 1000)
df_equals(pd.to_datetime(modin_s), pandas.to_datetime(pandas_s))
# Other inputs for to_datetime
value = 1490195805
assert pd.to_datetime(value, unit="s") == pandas.to_datetime(value, unit="s")
value = 1490195805433502912
assert pd.to_datetime(value, unit="ns") == pandas.to_datetime(value, unit="ns")
value = [1, 2, 3]
assert pd.to_datetime(value, unit="D", origin=pd.Timestamp("2000-01-01")).equals(
pandas.to_datetime(value, unit="D", origin=pandas.Timestamp("2000-01-01"))
)
@pytest.mark.parametrize(
"data, errors, downcast",
[
(["1.0", "2", -3], "raise", None),
(["1.0", "2", -3], "raise", "float"),
(["1.0", "2", -3], "raise", "signed"),
(["apple", "1.0", "2", -3], "ignore", None),
(["apple", "1.0", "2", -3], "coerce", None),
],
)
def test_to_numeric(data, errors, downcast):
modin_series = pd.Series(data)
pandas_series = | pandas.Series(data) | pandas.Series |
from itertools import product
import numpy as np
import pytest
from pandas.core.dtypes.common import is_interval_dtype
import pandas as pd
import pandas._testing as tm
# Each test case consists of a tuple with the data and dtype to create the
# test Series, the default dtype for the expected result (which is valid
# for most cases), and the specific cases where the result deviates from
# this default. Those overrides are defined as a dict with (keyword, val) as
# dictionary key. In case of multiple items, the last override takes precedence.
test_cases = [
(
# data
[1, 2, 3],
# original dtype
np.dtype("int32"),
# default expected dtype
"Int32",
# exceptions on expected dtype
{("convert_integer", False): np.dtype("int32")},
),
(
[1, 2, 3],
np.dtype("int64"),
"Int64",
{("convert_integer", False): np.dtype("int64")},
),
(
["x", "y", "z"],
np.dtype("O"),
pd.StringDtype(),
{("convert_string", False): np.dtype("O")},
),
(
[True, False, np.nan],
np.dtype("O"),
pd.BooleanDtype(),
{("convert_boolean", False): np.dtype("O")},
),
(
["h", "i", np.nan],
np.dtype("O"),
pd.StringDtype(),
{("convert_string", False): np.dtype("O")},
),
( # GH32117
["h", "i", 1],
np.dtype("O"),
np.dtype("O"),
{},
),
(
[10, np.nan, 20],
np.dtype("float"),
"Int64",
{
("convert_integer", False, "convert_floating", True): "Float64",
("convert_integer", False, "convert_floating", False): np.dtype("float"),
},
),
(
[np.nan, 100.5, 200],
np.dtype("float"),
"Float64",
{("convert_floating", False): np.dtype("float")},
),
(
[3, 4, 5],
"Int8",
"Int8",
{},
),
(
[[1, 2], [3, 4], [5]],
None,
np.dtype("O"),
{},
),
(
[4, 5, 6],
np.dtype("uint32"),
"UInt32",
{("convert_integer", False): np.dtype("uint32")},
),
(
[-10, 12, 13],
np.dtype("i1"),
"Int8",
{("convert_integer", False): np.dtype("i1")},
),
(
[1.2, 1.3],
np.dtype("float32"),
"Float32",
{("convert_floating", False): np.dtype("float32")},
),
(
[1, 2.0],
object,
"Int64",
{
("convert_integer", False): "Float64",
("convert_integer", False, "convert_floating", False): np.dtype("float"),
("infer_objects", False): np.dtype("object"),
},
),
(
[1, 2.5],
object,
"Float64",
{
("convert_floating", False): np.dtype("float"),
("infer_objects", False): np.dtype("object"),
},
),
(["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
pd.DatetimeTZDtype(tz="UTC"),
pd.DatetimeTZDtype(tz="UTC"),
{},
),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
"datetime64[ns]",
np.dtype("datetime64[ns]"),
{},
),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
object,
np.dtype("datetime64[ns]"),
{("infer_objects", False): np.dtype("object")},
),
(pd.period_range("1/1/2011", freq="M", periods=3), None, pd.PeriodDtype("M"), {}),
(
pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
None,
pd.IntervalDtype("int64", "right"),
{},
),
]
class TestSeriesConvertDtypes:
@pytest.mark.parametrize(
"data, maindtype, expected_default, expected_other",
test_cases,
)
@pytest.mark.parametrize("params", product(*[(True, False)] * 5))
def test_convert_dtypes(
self, data, maindtype, params, expected_default, expected_other
):
warn = None
if (
hasattr(data, "dtype")
and data.dtype == "M8[ns]"
and isinstance(maindtype, pd.DatetimeTZDtype)
):
# this astype is deprecated in favor of tz_localize
warn = FutureWarning
if maindtype is not None:
with tm.assert_produces_warning(warn):
series = pd.Series(data, dtype=maindtype)
else:
series = | pd.Series(data) | pandas.Series |
from __future__ import print_function, division
import itertools
from copy import deepcopy
from collections import OrderedDict
from warnings import warn
import nilmtk
import pandas as pd
import numpy as np
import metrics
import matplotlib.pyplot as plt
from hmmlearn import hmm
from nilmtk.feature_detectors import cluster
from nilmtk.disaggregate import Disaggregator
# Python 2/3 compatibility
from six import iteritems
from builtins import range
SEED = 42
# Fix the seed for repeatibility of experiments
np.random.seed(SEED)
def sort_startprob(mapping, startprob):
""" Sort the startprob according to power means; as returned by mapping
"""
num_elements = len(startprob)
new_startprob = np.zeros(num_elements)
for i in range(len(startprob)):
new_startprob[i] = startprob[mapping[i]]
return new_startprob
def sort_covars(mapping, covars):
new_covars = np.zeros_like(covars)
for i in range(len(covars)):
new_covars[i] = covars[mapping[i]]
return new_covars
def sort_transition_matrix(mapping, A):
"""Sorts the transition matrix according to increasing order of
power means; as returned by mapping
Parameters
----------
mapping :
A : numpy.array of shape (k, k)
transition matrix
"""
num_elements = len(A)
A_new = np.zeros((num_elements, num_elements))
for i in range(num_elements):
for j in range(num_elements):
A_new[i, j] = A[mapping[i], mapping[j]]
return A_new
def sort_learnt_parameters(startprob, means, covars, transmat):
mapping = return_sorting_mapping(means)
means_new = np.sort(means, axis=0)
startprob_new = sort_startprob(mapping, startprob)
covars_new = sort_covars(mapping, covars)
transmat_new = sort_transition_matrix(mapping, transmat)
assert np.shape(means_new) == np.shape(means)
assert np.shape(startprob_new) == np.shape(startprob)
assert np.shape(transmat_new) == np.shape(transmat)
return [startprob_new, means_new, covars_new, transmat_new]
def compute_A_fhmm(list_A):
"""
Parameters
-----------
list_pi : List of PI's of individual learnt HMMs
Returns
--------
result : Combined Pi for the FHMM
"""
result = list_A[0]
for i in range(len(list_A) - 1):
result = np.kron(result, list_A[i + 1])
return result
def compute_means_fhmm(list_means):
"""
Returns
-------
[mu, cov]
"""
states_combination = list(itertools.product(*list_means))
num_combinations = len(states_combination)
means_stacked = np.array([sum(x) for x in states_combination])
means = np.reshape(means_stacked, (num_combinations, 1))
cov = np.tile(5 * np.identity(1), (num_combinations, 1, 1))
return [means, cov]
def compute_pi_fhmm(list_pi):
"""
Parameters
-----------
list_pi : List of PI's of individual learnt HMMs
Returns
-------
result : Combined Pi for the FHMM
"""
result = list_pi[0]
for i in range(len(list_pi) - 1):
result = np.kron(result, list_pi[i + 1])
return result
def create_combined_hmm(model):
list_pi = [model[appliance].startprob_ for appliance in model]
list_A = [model[appliance].transmat_ for appliance in model]
list_means = [model[appliance].means_.flatten().tolist()
for appliance in model]
pi_combined = compute_pi_fhmm(list_pi)
A_combined = compute_A_fhmm(list_A)
[mean_combined, cov_combined] = compute_means_fhmm(list_means)
# combined_model = hmm.GaussianHMM(
# n_components=len(pi_combined), covariance_type='full',
# startprob=pi_combined, transmat=A_combined)
combined_model = hmm.GaussianHMM(n_components=len(pi_combined), covariance_type='full')
combined_model.startprob_ = pi_combined
combined_model.transmat_ = A_combined
combined_model.covars_ = cov_combined
combined_model.means_ = mean_combined
return combined_model
def return_sorting_mapping(means):
means_copy = deepcopy(means)
means_copy = np.sort(means_copy, axis=0)
# Finding mapping
mapping = {}
for i, val in enumerate(means_copy):
mapping[i] = np.where(val == means)[0][0]
return mapping
def decode_hmm(length_sequence, centroids, appliance_list, states):
"""
Decodes the HMM state sequence
"""
hmm_states = {}
hmm_power = {}
total_num_combinations = 1
for appliance in appliance_list:
total_num_combinations *= len(centroids[appliance])
for appliance in appliance_list:
hmm_states[appliance] = np.zeros(length_sequence, dtype=np.int)
hmm_power[appliance] = np.zeros(length_sequence)
for i in range(length_sequence):
factor = total_num_combinations
for appliance in appliance_list:
# assuming integer division (will cause errors in Python 3x)
factor = factor // len(centroids[appliance])
temp = int(states[i]) / factor
hmm_states[appliance][i] = temp % len(centroids[appliance])
hmm_power[appliance][i] = centroids[
appliance][hmm_states[appliance][i]]
return [hmm_states, hmm_power]
class FHMM(Disaggregator):
"""
Attributes
----------
model : dict
predictions : pd.DataFrame()
meters : list
MIN_CHUNK_LENGTH : int
"""
def __init__(self):
self.model = {}
self.predictions = | pd.DataFrame() | pandas.DataFrame |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
def test_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.round(freq)
def test_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.min.ceil("s")
expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.max.floor("s")
expected = Timedelta.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.round, Timedelta.floor, Timedelta.ceil]
)
def test_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = | Timedelta(10, unit="d") | pandas.Timedelta |
from numpy.ma import argmax
from pandas import DataFrame
from sklearn.model_selection import train_test_split
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from audio_utils.utils.file_utils import save_object
import numpy as np
import pandas as pd
from sounds import sounds_config
from sounds.model_plotter import plot_history, plot_confusion_matrix
class AudioFeaturesModel:
def __init__(self, model_name, le, layers):
self.le = le
self.model = Sequential(name=model_name)
# Builds layers based on the structure in model_structures
for layer in layers:
self.model.add(layer)
def compile(self):
"""Compile the model and print the structure"""
self.model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
self.model.summary()
def test_model(self, x_data, y_data):
"""Calculate the model's accuracy on the input dataset"""
score = self.model.evaluate(x_data, y_data, verbose=0)
accuracy = 100 * score[1]
return accuracy
def train_model(self, x_train, y_train, x_val, y_val):
"""Train and save the model"""
early_stopping = EarlyStopping(monitor='val_loss', patience=sounds_config.patience, mode='min')
checkpointer = ModelCheckpoint(filepath=f'{sounds_config.sounds_model_dir}/{self.model.name}.hdf5', verbose=1,
save_best_only=True)
history = self.model.fit(x_train, y_train, batch_size=sounds_config.num_batch_size,
epochs=sounds_config.num_epochs, validation_data=(x_val, y_val),
callbacks=[checkpointer, early_stopping], verbose=1)
self.le.save(self.model.name)
return history
def calculate_confusion_matrix(self, x_test, y_test):
"""Calculate the probabilities required for the confusion matrix and create a dataframe"""
y_pred = self.model.predict_classes(x_test)
y_test = argmax(y_test, axis=1)
con_mat = confusion_matrix(labels=y_test, predictions=y_pred).numpy()
con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2)
classes = self.le.inverse_transform(list(range(0, self.le.encoded_labels.shape[1])))
return pd.DataFrame(con_mat_norm, index=classes, columns=classes)
def train_and_test_model(features, le, model):
"""Use the AudioFeaturesModel methods to train and test the model"""
# Split the data into training,validation and testing
x_train, x_test, y_train, y_test = train_test_split(features, le.encoded_labels,
test_size=1 - sounds_config.train_ratio,
random_state=44)
x_cv, x_test, y_cv, y_test = train_test_split(x_test, y_test,
test_size=sounds_config.test_ratio / (
sounds_config.test_ratio + sounds_config.validation_ratio),
random_state=44)
save_test_data(x_test, y_test, le, model.model.name)
# Calculate pre trained accuracy
pre_acc = model.test_model(x_test, y_test)
print(f'Pre-trained accuracy = {pre_acc:.4f}')
# Train the model and plot the learning curves
plot_history(model.train_model(x_train, y_train, x_cv, y_cv))
# Test trained model on training, validation and test sets
post_acc_train = model.test_model(x_train, y_train)
print(f'Training accuracy = {post_acc_train:.4f}')
post_acc_cv = model.test_model(x_cv, y_cv)
print(f'Cross-validation accuracy = {post_acc_cv:.4f}')
post_acc_test = model.test_model(x_test, y_test)
print(f'Testing accuracy = {post_acc_test:.4f}')
# Calculate and plot the confusion matrix
plot_confusion_matrix(model.calculate_confusion_matrix(x_test, y_test))
def save_test_data(x_test, y_test, le, model_name):
test_df = | DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import pandas as pd
from pandas.io.json import json_normalize
import json
import argparse
import logzero
import logging
def get_args():
parser = argparse.ArgumentParser(
description="Convert t-SNE matrix to HTML plot.")
parser.add_argument(
"--tissue-type",
type=str,
help="Tumor tissue type: ['Hematologic Malignancy', 'Solid Tumor', 'Brain Tumor']")
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="Debug the script.")
parser.add_argument(
"--subtype-file",
type=str,
help="Subtype definitions file"
)
parser.add_argument(
"--metadata-file",
type=str,
help="File containing metadata for the samples"
)
parser.add_argument(
"--tsne-file",
type=str,
help="File containing t-SNE matrix for the samples"
)
args = parser.parse_args()
if args.debug:
logzero.loglevel(logging.DEBUG)
else:
logzero.loglevel(logging.INFO)
return args
def label_samples (row):
if pd.isna(row['projects']) or row['projects'] == 'input':
return 'user'
else:
return None
if __name__ == "__main__":
args = get_args()
# Read t-SNE matrix
matrix = pd.read_table(args.tsne_file)
# Trim precision on x/y coordinates
matrix = matrix.round({'t1': 2, 't2': 2})
# Read the metadata file
metadata = pd.read_json(args.metadata_file)
# Convert metadata JSON column 'properties' into a table.
# Then combined with the rest of the metadata in a single dataframe.
prop = json_normalize(metadata['properties'])
metadata = metadata.drop(columns=['properties'])
metadata = pd.concat([metadata, prop], axis=1, sort=False)
combined = matrix.merge(metadata, left_on=['samples','projects'], right_on=['sample_name', 'sj_datasets'], how='left')
# Get subtype group and label information for each disease code
subtypes = pd.read_csv(args.subtype_file)
# Add the subtype information to the dataframe
combined = combined.merge(subtypes, left_on=['sj_diseases'], right_on=['sj_disease'], how="left")
# Rename t-SNE coordinate columns to x & y. Rename subtype labels to group & label
combined = combined.rename(columns={"t1":"x", "t2":"y", "t_SNE group": "group","t_SNE_label": "label"})
combined['highlight'] = combined.apply(lambda row: label_samples(row), axis=1)
# Drop unneeded columns
combined = combined.drop(columns=["id", "name", "folder", "sj_ega_accessions", "sj_access_unit", "data_access_level", "sj_dataset_accessions", "vendable", "file_state", "released", "sj_pmid_accessions", "file_type", "sj_embargo_date", "sj_pipeline_name", "sj_pipeline_version", "sj_pub_accessions", "sj_publication_titles", "projects"])
combined = combined.drop(columns=["sj_disease","sj_long_disease_name_y","diagnosis_group"])
# Copy user submitted sample names to the standard 'sample_name' column
combined['sample_name'] = combined['sample_name'].fillna(combined['samples'])
# Rename duplicated column from merge
combined = combined.rename(columns={"sj_long_disease_name_x": "sj_long_disease_name"})
# Drop unneeded columns
combined = combined.drop(columns=["samples", "classes", "diagnosisNames"])
combined = combined.fillna("unknown")
# Split user samples from reference samples
user_samples = combined.loc[combined.highlight=='user']
samples = combined.loc[combined.highlight!='user']
# Fill any samples that didn't get subtype information with sensible defaults
samples.fillna({'group':'Other Cancer','label':samples['sj_long_disease_name']}, inplace=True)
# Create dataframe with just unique diseases for ProteinPaint
diseases = combined[['sj_diseases','color', 'attr_diagnosis_group', 'group', 'label']]
diseases = diseases.drop_duplicates()
diseases['attr_diagnosis_group'] = | pd.Categorical(diseases['attr_diagnosis_group'], ["Brain Tumor", "Solid Tumor", "Hematologic Malignancy", "Germ Cell Tumor"]) | pandas.Categorical |
# Databricks notebook source
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np
# COMMAND ----------
# MAGIC %md
# MAGIC #REGRESSION MODEL NOTES
# MAGIC ## We Can Conduct a few different version of this regression model by changing the dependent and independent variables
# MAGIC **Dependent Variable**
# MAGIC We can elect to make the dependent variable round score or tournament score
# MAGIC
# MAGIC *Round Score* - would give us more data points, but could also cause higher variation
# MAGIC
# MAGIC *Tournament Score* - would seem to be the better fit, but we may not have enough data points
# MAGIC
# MAGIC The dependent variable can also refer to tournament score across all tournaments, or for a specific tournament
# MAGIC
# MAGIC **Independent Variables**
# MAGIC
# MAGIC 4 major groups of Independent Variables
# MAGIC
# MAGIC *Greens In Regulation* : Describes how frequently the player makes in to the green at least 2 strokes away from par based on a number of situation. Evaluates a players skill in the fairways/middle game
# MAGIC
# MAGIC Consists of ['GIR_PCT_FAIRWAY_BUNKER', 'GIR_PCT_FAIRWAY', 'GIR_PCT_OVERALL', 'GIR_PCT_OVER_100', 'GIR_PCT_OVER_200','GIR_PCT_UNDER_100', 'GREEN_PCT_SCRAMBLE_SAND', 'GREEN_PCT_SCRAMBLE_ROUGH']
# MAGIC
# MAGIC *Tee Box*: Describes different elements of a players driving/tee shots. Evaluates a players skill off the tee/long game
# MAGIC
# MAGIC Consists of ['TEE_AVG_BALL_SPEED', 'TEE_AVG_DRIVING_DISTANCE', 'TEE_DRIVING_ACCURACY_PCT', 'TEE_AVG_LAUNCH_ANGLE', 'TEE_AVG_LEFT_ROUGH_TENDENCY_PCT', 'TEE_AVG_RIGHT_ROUGH_TENDENCY_PCT', 'TEE_AVG_SPIN_RATE']
# MAGIC
# MAGIC *Putting*: Describes a players performance on the green. Evaluates a players putting skill/short game
# MAGIC
# MAGIC Consists of ['PUTTING_AVG_ONE_PUTTS', 'PUTTING_AVG_TWO_PUTTS','PUTTING_AVG_PUTTS','PUTTING_AVG_DIST_BIRDIE_INCH']
# MAGIC
# MAGIC *Performance Based*: Descibes a players performance in terms of previous results and scores. Evaluates a players consistency and past performances
# MAGIC
# MAGIC Consists of ['Par3Average','Par4Average', 'Par5Average', 'HolesPerBirdie', 'HolesPerBogey','FINISHES_TOP10']
# MAGIC
# MAGIC **Independence Between Variables**
# MAGIC
# MAGIC To avoid creating bias in the regression model, we should avoid using the following highly coorelated independent variables together in the same model
# MAGIC
# MAGIC *GIR*: (GIR_PCT_OVERALL: GIR_PCT_OVER_100, GIR_PCT_FAIRWAY)
# MAGIC
# MAGIC *Tee*: (TEE_AVG_BALL_SPEED: TEE_AVG_DRIVING_DISTANCE)
# MAGIC
# MAGIC *Putting*: (PUTTING_AVG_ONE_PUTTS: PUTTING_AVG_TWO_PUTTS : PUTTING_AVG_PUTTS)
# MAGIC
# MAGIC *Performance Based*: (Par4Average: HolesPerBogey)
# COMMAND ----------
# Lets Start with the Dependent Variable as Round Score across all tournaments
roundsDf = pd.read_csv("/dbfs/FileStore/karbide/RoundsReg.txt")
playerStats = pd.read_csv("/dbfs/FileStore/karbide/PlayerStatsComplete.txt")
roundsDf.drop(["Unnamed: 0"], axis = 1, inplace = True)
playerStats.drop(["Unnamed: 0"], axis = 1, inplace = True)
# COMMAND ----------
roundScores = roundsDf[["PlayerID","RoundScore"]]
# COMMAND ----------
roundsReg = roundScores.merge(playerStats, how = "left", on = "PlayerID")
# COMMAND ----------
roundsReg.corr()
# none or the variables are highly coorelated with RoundScore but the performance based ones score the highest
# COMMAND ----------
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.metrics import r2_score
# COMMAND ----------
#selecting Independet Variables (X)
X = roundsReg[["Par4Average","HolesPerBirdie","PUTTING_AVG_DIST_BIRDIE_INCH","PUTTING_AVG_PUTTS","TEE_AVG_DRIVING_DISTANCE","TEE_DRIVING_ACCURACY_PCT", "FINISHES_TOP10", "GIR_PCT_OVERALL", "GIR_PCT_FAIRWAY_BUNKER"]]
Y = roundsReg[["RoundScore"]]
# COMMAND ----------
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.33)
# COMMAND ----------
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
# COMMAND ----------
pred = reg.predict(X_test)
err = pd.Series(Y_test["RoundScore"]) - [p[0]for p in pred]
# COMMAND ----------
display(err.hist(bins=100))
# seems we get some really crazy predictions
# COMMAND ----------
predDf = pd.DataFrame(pred)
predDf.describe()
# COMMAND ----------
Y_test.describe()
# COMMAND ----------
reg.score(pd.DataFrame(X_train), pd.DataFrame(Y_train))
# COMMAND ----------
#This shows the high variance I was worried about, Lets check accuracy
r2_score(Y_test["RoundScore"],pred)
# COMMAND ----------
import statistics as stats
def rmse(errors):
return(pow(stats.mean([pow(e,2) for e in errors]),0.5))
# COMMAND ----------
rmse(err)
# COMMAND ----------
# seems we are way off, lets change the dependent variable to tournament score
# COMMAND ----------
tournamentScore = roundsDf.groupby(["PlayerID","TournamentID"]).agg({"RoundScore":"sum"})
tournamentScore.reset_index(inplace = True)
#since we doing this across all tournaments, we can drop tournament ID
tournamentScore.drop(["TournamentID"],inplace = True, axis = 1)
# COMMAND ----------
t_Reg = tournamentScore.merge(playerStats, how = "left", on = "PlayerID")
# COMMAND ----------
t_Reg.corr()
# our coorelation are still getting stronger, but still there is little that is very strongly coorelated
# COMMAND ----------
X = t_Reg[["Par4Average","HolesPerBirdie","PUTTING_AVG_DIST_BIRDIE_INCH","PUTTING_AVG_PUTTS","TEE_AVG_DRIVING_DISTANCE","TEE_DRIVING_ACCURACY_PCT", "FINISHES_TOP10", "GIR_PCT_OVERALL", "GIR_PCT_FAIRWAY_BUNKER"]]
Y = t_Reg[["RoundScore"]]
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.2)
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
# COMMAND ----------
pred = reg.predict(X_test)
err = pd.Series(Y_test["RoundScore"]) - [p[0]for p in pred]
# COMMAND ----------
display(err.hist(bins=100))
# COMMAND ----------
predDf = pd.DataFrame(pred)
print(predDf.describe())
print(Y_test.describe())
# COMMAND ----------
print ("R2 Train")
print(reg.score(pd.DataFrame(X_train), pd.DataFrame(Y_train)))
print("R2 Test")
print(r2_score(Y_test["RoundScore"],pred))
print("RMSE")
print(rmse(err))
# COMMAND ----------
def linearReg(ind,dep,split):
X_train, X_test, Y_train, Y_test = train_test_split(ind,dep, test_size = split)
reg = linear_model.LinearRegression()
reg.fit(pd.DataFrame(X_train),pd.DataFrame(Y_train))
pred = reg.predict(X_test)
err = pd.Series(Y_test["RoundScore"]) - [p[0]for p in pred]
print ("R2 Train")
print(reg.score(pd.DataFrame(X_train), | pd.DataFrame(Y_train) | pandas.DataFrame |
import copy
import csv
import io
import os
from pathlib import Path
import socket
import tempfile
import threading
import unittest
import pandas as pd
import pyarrow as pa
from pyarrow import csv as arrow_csv
from cleanup import cleanup_on_shutdown, directories_to_delete
import main
from proto.aiengine.v1 import aiengine_pb2
from tests import common
import train
class TrainingLoopTests(unittest.TestCase):
ALGORITHM = os.environ.get("ALGORITHM")
IPC_PATH = Path("/", "tmp", "spice_ai_test_loop.sock")
def setUp(self):
# Preventing tensorflow verbose initialization
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if self.ALGORITHM is None:
self.ALGORITHM = "dql" # pylint: disable=invalid-name
self.aiengine = main.AIEngine()
self.trader_init_req = common.get_init_from_json(
init_data_path="../../test/assets/aiengine/api/trader_init.json",
pod_name="trader",
)
with open("../../test/assets/data/csv/trader.csv", "r", encoding="utf8") as trader_data:
self.trader_data_csv = trader_data.read()
self.episode_results = []
self.original_post_episode_result = train.post_episode_result
train.post_episode_result = (
lambda request_url, episode_data: self.episode_results.append(
{"request_url": request_url, "episode_data": episode_data}
)
)
self.original_end_of_episode = train.end_of_episode
self.temp_dir = tempfile.mkdtemp(prefix='spice_test_')
directories_to_delete.append(self.temp_dir)
def tearDown(self):
train.post_episode_result = self.original_post_episode_result
train.end_of_episode = self.original_end_of_episode
cleanup_on_shutdown()
def init(
self,
init_req: aiengine_pb2.InitRequest,
expected_error: bool = False,
expected_result: str = "ok",
):
resp = self.aiengine.Init(init_req, None)
self.assertEqual(resp.error, expected_error)
self.assertEqual(resp.result, expected_result)
def add_data(self, pod_name: str, csv_data: str, should_error=False):
table = arrow_csv.read_csv(io.BytesIO(csv_data.encode()))
ready_barrier = threading.Barrier(2, timeout=2)
ipc_thread = threading.Thread(target=self.ipc_server, args=(self.IPC_PATH, table, ready_barrier,))
ipc_thread.start()
ready_barrier.wait()
resp = self.aiengine.AddData(
aiengine_pb2.AddDataRequest(pod=pod_name, unix_socket=str(self.IPC_PATH)), None
)
ipc_thread.join()
if not should_error:
self.assertFalse(resp.error)
return resp
@staticmethod
def ipc_server(ipc_path: Path, table: pa.Table, ready_barrier: threading.Barrier):
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as arrow_socket:
if ipc_path.exists():
ipc_path.unlink()
try:
arrow_socket.settimeout(2)
arrow_socket.bind(str(ipc_path).encode())
arrow_socket.listen(1)
ready_barrier.wait()
connection, _address = arrow_socket.accept()
with connection:
writer = pa.ipc.RecordBatchStreamWriter(connection.makefile(mode="wb"), table.schema)
writer.write_table(table)
except OSError as error:
print(error)
arrow_socket.shutdown(socket.SHUT_RDWR)
ipc_path.unlink()
def start_training(
self, pod_name: str, flight: str = None, number_episodes: int = None, epoch_time: int = None,
expected_error: bool = False, expected_result: str = "started_training"):
train_req = aiengine_pb2.StartTrainingRequest(
pod=pod_name,
number_episodes=number_episodes,
flight=flight,
epoch_time=epoch_time,
learning_algorithm=self.ALGORITHM,
training_data_dir=self.temp_dir)
resp = self.aiengine.StartTraining(train_req, None)
self.assertEqual(resp.error, expected_error)
self.assertEqual(resp.result, expected_result)
def wait_for_training(self):
self.assertIsNotNone(main.Dispatch.TRAINING_THREAD)
main.Dispatch.TRAINING_THREAD.join()
def inference(self, pod_name: str, tag: str, assertion_on_response=None):
resp = self.aiengine.GetInference(
aiengine_pb2.InferenceRequest(pod=pod_name, tag=tag), None
)
self.assertFalse(resp.response.error)
self.assertEqual(resp.tag, tag)
if assertion_on_response is not None:
assertion_on_response(resp)
def validate_episode_data(
self, pod_name, flight, number_episodes, num_actions, episode_results
):
self.assertEqual(len(episode_results), number_episodes)
index = episode_results[0]["episode_data"]["episode"]
for episode_result in episode_results:
episode_data = episode_result["episode_data"]
self.assertEqual(
episode_result["request_url"],
f"http://localhost:8000/api/v0.1/pods/{pod_name}/training_runs/{flight}/episodes",
)
self.assertEqual(episode_data["episode"], index)
self.assertTrue(episode_data["start"])
self.assertTrue(episode_data["end"])
self.assertTrue(episode_data["score"])
actions_count = 0
for action_name in episode_data["actions_taken"]:
actions_count += episode_data["actions_taken"][action_name]
self.assertEqual(actions_count, num_actions)
index += 1
def test_train_inference_loop(self):
# Step 1, init the pod
self.init(self.trader_init_req)
# Step 2, load the csv data
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
# Step 3, train
self.start_training("trader", flight, number_episodes)
self.wait_for_training()
# Step 4, inference
self.inference(
"trader",
"latest",
lambda response: self.assertNotEqual(response.confidence, 0.0),
)
# Validate the episode data
self.validate_episode_data(
"trader",
flight,
number_episodes,
num_actions=50,
episode_results=self.episode_results,
)
def test_train_inference_loop_train_different_epoch(self):
# Step 1, init the pod
self.init(self.trader_init_req)
# Step 2, load the csv data
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
# Step 3, train
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
# Step 4, inference
self.inference(
"trader",
"latest",
lambda response: self.assertNotEqual(response.confidence, 0.0),
)
# Validate the episode data
self.validate_episode_data(
"trader",
flight,
number_episodes,
num_actions=49,
episode_results=self.episode_results,
)
def test_train_gap_in_data(self):
with open("./tests/assets/csv/training_loop_gap_0.csv", "r", encoding="utf8") as data:
gap_data_0 = data.read()
with open("./tests/assets/csv/training_loop_gap_1.csv", "r", encoding="utf-8") as data:
gap_data_1 = data.read()
self.init(self.trader_init_req)
self.add_data("trader", gap_data_0)
self.add_data("trader", gap_data_1)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes)
self.wait_for_training()
self.validate_episode_data(
"trader",
flight,
number_episodes,
num_actions=50,
episode_results=self.episode_results,
)
gap_start = pd.to_datetime(1626697640, unit="s")
gap_end = pd.to_datetime(1626697860, unit="s")
table = main.data_managers["trader"].massive_table_sparse
filled_table = main.data_managers["trader"]._fill_table(table) # pylint: disable=protected-access
price = list(filled_table[gap_start:gap_start].coinbase_btcusd_close)[-1]
# Validate the forward filling is working.
current_time = gap_start
while current_time < gap_end:
next_price = list(filled_table[current_time:current_time].coinbase_btcusd_close)[
-1
]
self.assertEqual(price, next_price)
price = next_price
current_time += pd.to_timedelta(self.trader_init_req.granularity, unit="s")
def test_data_added_after_training_starts(self):
with open("./tests/assets/csv/training_loop_gap_0.csv", "r", encoding="utf-8") as data:
gap_data_0 = data.read()
with open("./tests/assets/csv/training_loop_gap_1.csv", "r", encoding="utf-8") as data:
gap_data_1 = data.read()
self.init(self.trader_init_req)
self.add_data("trader", gap_data_0)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes)
post_data_lock = threading.Lock()
episode_5_lock = threading.Lock()
episode_5_lock.acquire() # pylint: disable=consider-using-with
def release_lock_on_episode_5(episode: int):
if episode == 5 and episode_5_lock.locked():
episode_5_lock.release()
post_data_lock.acquire() # pylint: disable=consider-using-with
train.end_of_episode = release_lock_on_episode_5
# wait for episode 5
post_data_lock.acquire() # pylint: disable=consider-using-with
episode_5_lock.acquire() # pylint: disable=consider-using-with
print("Posting gap_data_1")
self.add_data("trader", gap_data_1)
post_data_lock.release()
self.wait_for_training()
episode_5_lock.release()
post_data_lock.release()
self.validate_episode_data(
"trader",
flight,
number_episodes,
num_actions=10,
episode_results=self.episode_results,
)
def test_epoch_earlier_than_data(self):
self.init(self.trader_init_req)
self.add_data("trader", self.trader_data_csv)
self.start_training("trader", "1", 10, 1626697400, expected_error=True, expected_result="epoch_time_invalid")
def test_epoch_offset_from_data(self):
self.init(self.trader_init_req)
self.add_data("trader", self.trader_data_csv)
self.start_training("trader", "1", 1, 1626697485, expected_error=False, expected_result="started_training")
self.wait_for_training()
def test_epoch_after_latest_data(self):
self.init(self.trader_init_req)
self.add_data("trader", self.trader_data_csv)
self.start_training(
"trader", "1", 10, 1626699240, expected_error=True, expected_result="not_enough_data_for_training")
def test_not_enough_data_for_training_no_data(self):
self.init(self.trader_init_req)
self.start_training("trader", "1", 10, expected_error=True, expected_result="not_enough_data_for_training")
def test_not_enough_data_for_training_late_epoch(self):
self.init(self.trader_init_req)
self.add_data("trader", self.trader_data_csv)
self.start_training(
"trader", "1", 10, epoch_time=1626698020,
expected_error=True, expected_result="not_enough_data_for_training")
def test_invalid_reward_handled_gracefully(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.actions["buy"] = "foo"
self.init(
trader_init,
expected_error=True,
expected_result="invalid_reward_function",
)
def test_no_rewards_handled_gracefully(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.actions.clear()
self.init(
trader_init,
expected_error=True,
expected_result="missing_actions",
)
def test_no_fields_handled_gracefully(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.fields.clear()
self.init(
trader_init,
expected_error=True,
expected_result="missing_fields",
)
def test_invalid_reward_post_error(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.actions["buy"] = "reward = foo"
trader_init.actions["sell"] = "reward = foo"
trader_init.actions["hold"] = "reward = foo"
self.init(trader_init)
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
self.assertEqual(len(self.episode_results), 1)
error_data = self.episode_results[0]["episode_data"]
self.assertEqual(error_data["error"], "invalid_reward_function")
self.assertEqual(
error_data["error_message"], """NameError("name 'foo' is not defined")"""
)
def test_unsafe_reward_error(self):
trader_init = copy.deepcopy(self.trader_init_req)
unsafe_action = "open('/tmp/FILE','w').write('this is unsafe!'); reward = 1"
trader_init.actions["buy"] = unsafe_action
trader_init.actions["sell"] = unsafe_action
trader_init.actions["hold"] = unsafe_action
self.init(trader_init)
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
self.assertEqual(len(self.episode_results), 1)
error_data = self.episode_results[0]["episode_data"]
self.assertEqual(error_data["error"], "invalid_reward_function")
self.assertEqual(
error_data["error_message"], """NameError("name 'open' is not defined")"""
)
def test_invalid_law_post_error(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.laws[0] = "can I do this?"
self.init(trader_init)
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
self.assertEqual(len(self.episode_results), 1)
error_data = self.episode_results[0]["episode_data"]
self.assertEqual(error_data["error"], "invalid_law_expression")
self.assertEqual(
error_data["error_message"],
"""SyntaxError('invalid syntax', ('<string>', 1, 5, 'can I do this?'))""",
)
def test_invalid_datasource_action_post_error(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.datasources[0].actions[
"buy"
] = "local_portfolio_usd_balance1 -= coinbase_btcusd_close\nlocal_portfolio_btc_balance += 1"
self.init(trader_init)
self.add_data("trader", self.trader_data_csv)
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, 1626697490)
self.wait_for_training()
self.assertEqual(len(self.episode_results), 1)
error_data = self.episode_results[0]["episode_data"]
self.assertEqual(error_data["error"], "invalid_datasource_action_expression")
self.assertEqual(
error_data["error_message"],
"""NameError("name 'local_portfolio_usd_balance1' is not defined")""",
)
def test_epoch_is_inferred_if_absent(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.epoch_time = 0
trader_init.period = 120
self.init(trader_init)
now_unix_seconds = (
pd.Timestamp.now() - pd.Timestamp("1970-01-01")
) // pd.Timedelta("1s")
csv_data = io.StringIO()
headers = [
"time",
"local_portfolio_usd_balance",
"local_portfolio_btc_balance",
"coinbase_btcusd_close",
]
writer = csv.writer(csv_data)
writer.writerow(headers)
for unix_seconds in range(now_unix_seconds - 70, now_unix_seconds - 10, 10):
row = [unix_seconds, None, None, 123]
writer.writerow(row)
self.add_data("trader", csv_data.getvalue())
flight = "1"
number_episodes = 5
self.start_training("trader", flight, number_episodes, expected_error=False)
# Counts will be unstable due to timing. The important thing is that we launch training with enough data.
self.wait_for_training()
def test_add_data_with_different_fields_fails(self):
trader_init = copy.deepcopy(self.trader_init_req)
trader_init.epoch_time = 0
trader_init.period = 120
self.init(self.trader_init_req)
now_unix_seconds = (
pd.Timestamp.now() - | pd.Timestamp("1970-01-01") | pandas.Timestamp |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import warnings
from unittest import TestCase
import pandas as pd
from tsfresh.utilities import dataframe_functions
import numpy as np
import six
class NormalizeTestCase(TestCase):
def test_with_dictionaries_one_row(self):
test_df = pd.DataFrame([{"value": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# A kind is not allowed with dicts
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, "a kind", None)
# The value must be present
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, "something other")
# Nothing should have changed compared to the input data
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
six.assertCountEqual(self, list(test_dict.keys()), list(result_dict.keys()))
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 1, "id": "id_1"})
# The algo should choose the correct value column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_dictionaries_two_rows(self):
test_df = pd.DataFrame([{"value": 2, "sort": 2, "id": "id_1"},
{"value": 1, "sort": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
# Sorting should work
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", "sort", None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
# Assert sorted and without sort column
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 1, "id": "id_1"})
self.assertEqual(result_dict["a"].iloc[1].to_dict(), {"value": 2, "id": "id_1"})
# Assert the algo has found the correct column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", "sort", None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_dictionaries_two_rows_sorted(self):
test_df = pd.DataFrame([{"value": 2, "id": "id_1"},
{"value": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# Pass the id
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 2, "id": "id_1"})
# The algo should have found the correct value column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_df(self):
# give everyting
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", "kind", "value")
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "value")
self.assertIn("a", result_dict)
six.assertCountEqual(self, list(result_dict["a"].columns), ["id", "value"])
self.assertEqual(list(result_dict["a"]["value"]), [3])
self.assertEqual(list(result_dict["a"]["id"]), [0])
# give no kind
test_df = pd.DataFrame([{"id": 0, "value": 3, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", None, "value")
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "value")
self.assertIn("value", result_dict)
six.assertCountEqual(self, list(result_dict["value"].columns), ["id", "value"])
self.assertEqual(list(result_dict["value"]["value"]), [3])
self.assertEqual(list(result_dict["value"]["id"]), [0])
# Let the function find the values
test_df = pd.DataFrame([{"id": 0, "a": 3, "b": 5, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", None, None)
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "_value")
self.assertIn("a", result_dict)
self.assertIn("b", result_dict)
six.assertCountEqual(self, list(result_dict["a"].columns), ["_value", "id"])
self.assertEqual(list(result_dict["a"]["_value"]), [3])
self.assertEqual(list(result_dict["a"]["id"]), [0])
six.assertCountEqual(self, list(result_dict["b"].columns), ["_value", "id"])
self.assertEqual(list(result_dict["b"]["_value"]), [5])
self.assertEqual(list(result_dict["b"]["id"]), [0])
def test_with_wrong_input(self):
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(AttributeError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"strange_id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": np.NaN, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": 0}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", None, None, None)
test_df = pd.DataFrame([{"id": 2}, {"id": 1}])
test_dict = {"a": test_df, "b": test_df}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
test_dict = {"a": pd.DataFrame([{"id": 2, "value_a": 3}, {"id": 1, "value_a": 4}]),
"b": pd.DataFrame([{"id": 2}, {"id": 1}])}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
test_df = pd.DataFrame([{"id": 0, "value": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", None, None, "value")
test_df = pd.DataFrame([{"id": 0, "value": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
None, None, None, "value")
class RollingTestCase(TestCase):
def test_with_wrong_input(self):
test_df = pd.DataFrame({"id": [0, 0], "kind": ["a", "b"], "value": [3, 3], "sort": [np.NaN, np.NaN]})
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
test_df = pd.DataFrame({"id": [0, 0], "kind": ["a", "b"], "value": [3, 3], "sort": [1, 1]})
self.assertRaises(AttributeError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="strange_id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
test_df = {"a": pd.DataFrame([{"id": 0}])}
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort=None, column_kind="kind",
rolling_direction=1)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id=None,
column_sort=None, column_kind="kind",
rolling_direction=1)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort=None, column_kind=None,
rolling_direction=0)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id=None,
column_sort=None, column_kind=None,
rolling_direction=0)
def test_assert_single_row(self):
test_df = pd.DataFrame([{"id": np.NaN, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
def test_positive_rolling(self):
first_class = pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)})
second_class = pd.DataFrame({"a": [10, 11], "b": [12, 13], "time": range(20, 22)})
first_class["id"] = 1
second_class["id"] = 2
df_full = pd.concat([first_class, second_class], ignore_index=True)
correct_indices = (["id=1, shift=3"] * 1 +
["id=1, shift=2"] * 2 +
["id=1, shift=1"] * 3 +
["id=2, shift=1"] * 1 +
["id=1, shift=0"] * 4 +
["id=2, shift=0"] * 2)
correct_values_a = [1, 1, 2, 1, 2, 3, 10, 1, 2, 3, 4, 10, 11]
correct_values_b = [5, 5, 6, 5, 6, 7, 12, 5, 6, 7, 8, 12, 13]
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1,
maximum_number_of_timeshifts=None)
self.assertListEqual(list(df["id"]), correct_indices)
self.assertListEqual(list(df["a"].values), correct_values_a)
self.assertListEqual(list(df["b"].values), correct_values_b)
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1,
maximum_number_of_timeshifts=1)
self.assertListEqual(list(df["id"]), correct_indices[3:])
self.assertListEqual(list(df["a"].values), correct_values_a[3:])
self.assertListEqual(list(df["b"].values), correct_values_b[3:])
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1,
maximum_number_of_timeshifts=2)
self.assertListEqual(list(df["id"]), correct_indices[1:])
self.assertListEqual(list(df["a"].values), correct_values_a[1:])
self.assertListEqual(list(df["b"].values), correct_values_b[1:])
df = dataframe_functions.roll_time_series(df_full, column_id="id", column_sort="time",
column_kind=None, rolling_direction=1,
maximum_number_of_timeshifts=4)
self.assertListEqual(list(df["id"]), correct_indices[:])
self.assertListEqual(list(df["a"].values), correct_values_a[:])
self.assertListEqual(list(df["b"].values), correct_values_b[:])
def test_negative_rolling(self):
first_class = pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)})
second_class = pd.DataFrame({"a": [10, 11], "b": [12, 13], "time": range(20, 22)})
first_class["id"] = 1
second_class["id"] = 2
df_full = | pd.concat([first_class, second_class], ignore_index=True) | pandas.concat |
""" Este script extrae información de los campos del HTML
del cvlac a partir de una base de datos inicial de los perfiles
"""
# Importar librerias/Modulos
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
from lxml import html
import scrapy
import time
# Extrae metadatos de la página semilla
def pag_principal():
n = 'todas'
link = 'https://minciencias.gov.co/convocatorias/' + n
#requests.packages.urllib3.disable_warnings()
encabezados = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
}
resp = requests.get(link, headers=encabezados, verify=False)
resp = resp.text
#soup = get_Soup('https://minciencias.gov.co/convocatorias/todas')
parser = html.fromstring(resp)
return parser
#Título
def titulo(parser):
titulo = parser.xpath('//table[@class="views-table cols-5"]/tbody/tr/td[@class="views-field views-field-title"]/a/text()')
return titulo
# Descripción
def descripcion(parser):
descripcion = []
for x in range(0,6):
#descrip = parser.xpath('//table/tbody/tr[' + str(x) + ']/td[' + str(x) + ']/text()')
descrip = parser.xpath('//table/tbody/tr['+str(x)+']/td[3]/text()')
descripcion.append(descrip)
decrip = []
for x in descripcion[1:6]:
des = x[0].strip()
decrip.append(des)
return decrip
#Presupuesto
def cuantia(parser):
cuantia = parser.xpath('//td[@class="views-field views-field-field-cuantia"]/text()')
presupuesto = []
for x in cuantia:
x = x.strip()
presupuesto.append(x)
return presupuesto
#Fecha Apertura
def fecha_apertura(parser):
fecha_apertura = parser.xpath('//table/tbody/tr//td[@class="views-field views-field-field-fecha-de-apertura"]/span/text()')
fe_aper = []
for x in fecha_apertura:
x = x.strip()
if x != '':
fe_aper.append(x.strip())
if len(fe_aper) < 5:
fecha_apertura = parser.xpath('//table/tbody/tr//td[@class="views-field views-field-field-fecha-de-apertura"]//text()')
fe_aper = []
for x in fecha_apertura:
x = x.strip()
if x != '':
fe_aper.append(x.strip())
if len(fe_aper) < 5:
fe_aper = []
for x in range(0,6):
fecha_apertura = parser.xpath('//table/tbody/tr[' + str(x) + ']/td[5]/text()')
if len(fecha_apertura) == 0:
x = ''
else:
for x in fecha_apertura:
x = x.strip()
#if x != '':
fe_aper.append(x.strip())
return fe_aper
#Links scrapy vertical
def links_vertical(link):
encabezados = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
}
resp = requests.get(link, headers=encabezados, verify=False)
resp = resp.text
response = scrapy.Selector(text=resp)
final = response.xpath('//td[@class="views-field views-field-title"]/a/@href').getall()
link = 'https://minciencias.gov.co'
links = []
for x in final:
if x.startswith('http://'):
links.append(x)
elif x.startswith('https://'):
links.append(x)
else:
links.append(link+x)
return links
# Extrae última página
def ult_page(url):
encabezados = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
}
resp = requests.get(url, headers=encabezados, verify=False)
resp = resp.text
response = scrapy.Selector(text=resp)
ult = response.xpath('//li[@class="pager-last last"]/a/@href').getall()
ult2 = ult[0].split('=')[1]
return ult2
### Vertical
def pag_vertical(link):
try:
encabezados = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
}
resp = requests.get(link, headers=encabezados, verify=False)
resp = resp.text
#soup = get_Soup('https://minciencias.gov.co/convocatorias/todas')
parser = html.fromstring(resp)
except:
parser = 'http://www.rutanmedellin.org/es/actualidad/noticias/item/abierta-convocatoria-para-solucionar-retos-energeticos-empresariales'
return parser
#Extrae Objetivo
def objetivo(parser):
try:
objetivo = parser.xpath('//div[@class="field-items"][2]/div[@class="field-item even"]/text()')
objetivo_pro = objetivo[0].strip()
except IndexError:
objetivo_pro = ''
except AttributeError:
objetivo_pro = ''
return objetivo_pro
# Extrae Público Objetivo
def publico_objetivo(parser):
try:
diri_a = parser.xpath('//div[@class="body2-convocatorias"]//div[@class="field-item even"]//text()')
n=0
dirigido_a = ''
for elemento in diri_a:
elemento = elemento.strip()
if elemento != '':
dirigido_a = dirigido_a + ' ' + elemento
n+=1
dirigido_a.strip()
except AttributeError:
dirigido_a = ''
return dirigido_a
# Extrae estado de la convocatoria
def estado(parser):
try:
estado = parser.xpath('//div[@class="sub-sub-panel panel-state-tex"]/p/text()')
estado = estado[0] + ' ' + estado[1]
except IndexError:
estado = ''
except AttributeError:
estado = ''
return estado
# Extrae Fecha de cierre
def fechas_cierre(parser):
try:
marcas = parser.xpath('//tr/td[@class="views-field views-field-field-numero"]/text()')
estados = []
fecha2 = ''
for i in marcas:
estados.append(i.strip())
for x in range(len(estados)):
marca = parser.xpath('//tr/td[@class="views-field views-field-field-numero"][1]/text()')[x]
if marca.strip() in estados:
fecha = parser.xpath('//tr/td[@class="views-field views-field-body"]/text()')[x]
if marca.strip() == 'Cierre':
fecha2 = fecha.strip()
break
else:
fecha2 = ''
except IndexError:
fecha2 = ''
except AttributeError:
fecha2 = ''
return fecha2.strip()
# Extrae fecha de Resultados preliminares
def fechas_resultados_preliminares(parser):
try:
marcas = parser.xpath('//tr/td[@class="views-field views-field-field-numero"]/text()')
estados = []
fecha2 = ''
for i in marcas:
estados.append(i.strip())
for x in range(len(estados)):
marca = parser.xpath('//tr/td[@class="views-field views-field-field-numero"][1]/text()')[x]
if marca.strip() in estados:
fecha = parser.xpath('//tr/td[@class="views-field views-field-body"]/text()')[x]
if marca.strip() == 'Publicación de resultados preliminares':
fecha2 = fecha.strip()
break
else:
fecha2 = ''
except IndexError:
fecha2 = ''
except AttributeError:
fecha2 = ''
return fecha2.strip()
#Extrae Fecha de públicación de resultados
def fechas_publicacion_resultados_definitivos(parser):
try:
marcas = parser.xpath('//tr/td[@class="views-field views-field-field-numero"]/text()')
estados = []
fecha2 = ''
for i in marcas:
estados.append(i.strip())
for x in range(len(estados)):
marca = parser.xpath('//tr/td[@class="views-field views-field-field-numero"][1]/text()')[x]
if marca.strip() in estados:
fecha = parser.xpath('//tr/td[@class="views-field views-field-body"]/text()')[x]
if marca.strip() == 'Publicación de resultados definitivos' or marca.strip() == 'Publicación de resultados definitivos':
fecha2 = fecha.strip()
break
else:
fecha2 = ''
except IndexError:
fecha2 = ''
except AttributeError:
fecha2 = ''
return fecha2.strip()
# Extrae links de pdfs
def links_pdf(link):
encabezados = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
}
resp = requests.get(link, headers=encabezados, verify=False)
resp = resp.text
response = scrapy.Selector(text=resp)
links = response.xpath('//span[@class="file"]/a/@href').getall()
urls = ''
for link in links:
urls = urls + ', ' + link
urls = urls[2::]
return urls
# Extrae parser de página horizontal
def pag_horizontal(link):
encabezados = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'
}
resp = requests.get(link, headers=encabezados, verify=False)
resp = resp.text
#soup = get_Soup('https://minciencias.gov.co/convocatorias/todas')
parser = html.fromstring(resp)
return parser
"""
Genera función integran de scrapy de colciencias (Colombia)
"""
# Función integradora
def colombia():
# Página principal
# Configuración
colombia = pd.DataFrame()
parser = pag_principal()
# Extracción página principal
tit = titulo(parser)
desc = descripcion(parser)
cuant = cuantia(parser)
aper = fecha_apertura(parser)
links = links_vertical('https://minciencias.gov.co/convocatorias/todas')
#Vertical Primera página
obj = []
pub_objetivo = []
est= []
fe_cierre = []
fe_preliminares = []
fe_definitivos = []
pdf = []
for link in links:
parser = pag_vertical(link)
obj.append(objetivo(parser))
pub_objetivo.append(publico_objetivo(parser))
est.append(estado(parser))
fe_cierre.append(fechas_cierre(parser))
fe_preliminares.append(fechas_resultados_preliminares(parser))
fe_definitivos.append(fechas_publicacion_resultados_definitivos(parser))
pdf.append(links_pdf(link))
# CSV
colombia['Título'] = tit
colombia['Descripción'] = desc
colombia['Objetivo'] = obj
colombia['Cuantia'] = cuant
colombia['Fecha Apertura'] = aper
colombia['Fecha Cierre'] = fe_cierre
colombia['Fecha Resultados Preliminares'] = fe_preliminares
colombia['Fecha Publicación Resultados Definitivos'] = fe_definitivos
colombia['Link'] = links
colombia['Público Objetivo'] = pub_objetivo
colombia['Estado de la Convocatoria'] = est
colombia['Links pdf'] = pdf
#Horizontal
ult = ult_page('https://minciencias.gov.co/convocatorias/todas?page=1')
for pag in range(1, int(ult)):
print(pag)
url = 'https://minciencias.gov.co/convocatorias/todas?page=' + str(pag)
horizontal = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""goog-stock-prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1AKrijE9xS03KZo8MMsMxbTD9WkwisjYl
#Stock Prediction Using LSTM
<img src = 'https://www.usnews.com/dims4/USNEWS/85cf3cc/2147483647/thumbnail/640x420/quality/85/?url=http%3A%2F%2Fmedia.beam.usnews.com%2Fd8%2F17%2F754293f84bc69ddfbffaa5793cae%2F190411-stockmarket-stock.jpg' width="400">
In this project, a simple LSTM neural network will predict future change in the stock prices of Alphabet (GOOG). Long Short-Term Memory (LSTM) networks are a type of recurrent neural network capable of learning order dependence in sequence prediction problems. The stock dataset was obtained from [Kaggle](https://www.kaggle.com/borismarjanovic/price-volume-data-for-all-us-stocks-etfs).
"""
from google.colab import drive # for Google Colab
drive.mount('/content/drive', force_remount=True)
import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
from pandas import datetime
import math, time
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import itertools
import datetime
from operator import itemgetter
from math import sqrt
import torch
import torch.nn as nn
import os
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
"""## Load Data"""
dir = "/content/drive/MyDrive/Colab Notebooks/Data"
def stocks_data(symbols, dates):
df = pd.DataFrame(index=dates)
for symbol in symbols:
df_temp = pd.read_csv(dir + "/Stocks/{}.us.txt".format(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Close'], na_values=['nan'])
df_temp = df_temp.rename(columns={'Close': symbol})
df = df.join(df_temp)
return df
dates = pd.date_range('2015-01-02', '2016-12-31', freq='B')
symbols = ['goog', 'aapl', 'amzn', 'msft', 'tsla']
df = stocks_data(symbols, dates)
df.fillna(method='pad')
df.interpolate().plot()
plt.show()
df.head()
dates = pd.date_range('2014-03-27', '2017-11-10', freq='B')
df_date = | pd.DataFrame(index=dates) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
# Choose GBDT Regression model as baseline
# my_model = GradientBoostingRegressor()
# Training Step
def my_train_func(station):
train_data = pd.read_csv('train-dataset/point_date_' + station + '.csv')
train_data_Y = train_data['actualPowerGeneration']
# Drop some non-relative factors
drop_columns = ['longitude', 'latitude', 'RadiationHorizontalPlane', 'Temperature', 'actualPowerGeneration',
'Humidity', 'atmosphericPressure', 'windDirection', 'scatteredRadiation']
train_data_X = train_data.drop(axis=1, columns=drop_columns)
train_data_X['month'] = pd.to_datetime(train_data_X.Time).dt.month
train_data_X['day'] = pd.to_datetime(train_data_X.Time).dt.day
train_data_X['hour'] = pd.to_datetime(train_data_X.Time).dt.hour
train_data_X = train_data_X.drop(axis=1, columns=['Time'])
# Validation
X_train, X_test, Y_train, Y_test = train_test_split(train_data_X, train_data_Y, test_size=0.2, random_state=40)
myGBR = GradientBoostingRegressor(n_estimators=500,max_depth=7)
myGBR.fit(X_train, Y_train)
Y_pred = myGBR.predict(X_test)
# Output model to global variation
# my_model = myGBR
_ = joblib.dump(myGBR, 'model/' + station + '_model.pkl', compress=9)
print('Training completed. MSE on validation set is {}'.format(mean_squared_error(Y_test, Y_pred)))
print('Factors below are used: \n{}'.format(list(X_train.columns)))
def my_spredict_func(station, input_file, output_file):
# Clean test data
columns = 'Time,longitude,latitude,directRadiation,scatterdRadiation,windSpeed,airTransparency,airDensity'
columns = list(columns.split(','))
test_data = pd.read_csv('test-dataset/' + input_file, names=columns)
drop_columns = ['longitude', 'latitude', 'airTransparency', 'airDensity']
test_data = test_data.drop(axis=1, columns=drop_columns)
test_data['month'] = pd.to_datetime(test_data.Time).dt.month
test_data['day'] = pd.to_datetime(test_data.Time).dt.day
test_data['hour'] = pd.to_datetime(test_data.Time).dt.hour
test_data['min'] = pd.to_datetime(test_data.Time).dt.minute
# Find the time point we need to start with
test_data = test_data.sort_values(by='Time')
# Find the latest time point
time_point = test_data[test_data['hour'] == 0][test_data['min'] == 0].index.tolist()[0]
start_point = test_data.loc[time_point]['Time']
observation_period = pd.date_range(start=start_point, periods=96, freq='15T').strftime("%Y-%m-%d %H:%M:%S").tolist()
test_data = test_data.drop(axis=1, columns=['Time', 'min'])
# Simply fill the NaN values, need more discussion
test_data = test_data.fillna(method='ffill')
test_data = test_data.fillna(0)
test_data = test_data.iloc[time_point:time_point + 96]
try:
my_model = joblib.load('model/' + station + '_model.pkl')
print('Find pretrained model!\n')
except:
print('Need train first!')
exit(0)
result = my_model.predict(test_data)
# result = [at_least(x) for x in result]
two_columns = ['Time', 'Short Predict']
result = | pd.DataFrame(data={two_columns[0]: observation_period, two_columns[1]: result}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 11:32:40 2021
@author: bianca
"""
# +++ IMPLIED GROWTH RATES +++ #
import pandas as pd
import os
import numpy as np
from datetime import datetime
## select Merge File US Equity and WRDS
df_assig3 = pd.read_csv("./data/external/assignment_3_sp500_constituents_with_daily_mdata.csv")
df_assig3['date']
df_assig3.head(29)
df_assig3.iloc[:]
# S&P 500 Index
SP500_avg = df_assig3.groupby(['date']).agg({
'prc': 'mean'
})
SP500_avg
SP500_index = pd.DataFrame(SP500_avg, columns= ['prc'])
SP500_index
SP500_index.to_csv("./data/external/assignment_3_SP500_index.csv")
# summarize data
df2 = df_assig3.groupby(['date', 'comnam', 'permno', 'hsiccd', 'ticker', 'gvkey']).agg({
'prc': 'mean'
})
df2
df2.to_csv("./data/external/assignment_3_sp500_summary.csv")
df_CAR_UE = pd.read_csv("./data/external/assign3_summary CAR UE.csv")
df_CAR_UE['hsiccd']
type(['month'])
df_CAR_UE['month'].astype(int)
## create 'month' and 'year' column
| pd.Timestamp(df_CAR_UE['date']) | pandas.Timestamp |
import cv2
from PIL import Image
import numpy as np
import pandas as pd
import torch
from torch import nn
import torchvision
from torchsat.transforms import transforms_seg
import matplotlib.pyplot as plt
from torchvision.transforms import transforms
from torch.utils.data import Dataset
import torch.nn.functional as F
from tqdm.autonotebook import tqdm
import time
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc
# this function came from https://www.kaggle.com/amalrda/imagesegmentation
def decode_pixels(pix, rows=2100, cols=1400,label=255):
"""Encode the pixel location and length pairs into a mask.
This function is derived from https://www.kaggle.com/amalrda/imagesegmentation (it's basically copied and pasted).
Take in a row from the 'EncodedPixels' column of the train.csv dataframe and converts it to a mask.
Parameters
----------
pix: list - This is split into pairs of starting pixels and lengths. These correspond to
an pixels in a 1D array which is later reshaped into the appropriate width and height.
rows: number of rows the encoded image should be reshaped to. Defaults to 2100.
cols: number of columnss the encoded image should be reshaped to. Defaults to 1400.
label: what value the masked pixels should be assigned. Defaults to 255
Returns
-------
np.array - an array that contains the mask of the encoded pixels passed in.
"""
# if there is information in the pixel list, then parse it and make the (pixel, length) pairs
if isinstance(pix,str):
# coverting the string into a list of numbers
rle_numbers = [int(num_string) for num_string in pix.split(' ')]
# Coverting them into starting index and length pairs
rle_pairs = np.array(rle_numbers).reshape(-1,2)
# otherwise, then make the array of (pixel, length) pairs empty
else:
rle_pairs = np.array([])
# Creating a blank image in form of a single row array
img = np.zeros(rows*cols, dtype=np.uint8)
# Setting the segmented pixels in the img
for ind, length in rle_pairs:
ind -= 1
img[ind:ind+length] = label
img = img.reshape(rows,cols)#,1)
img = img.T
return img
class CloudData(Dataset):
"""This class is used for the cloud data"""
def __init__(self, data_directory = None, mask_df = None, data_type = 'train', transform=None, output_width=256, output_height=256, normalize_func = None, preprocessing=None):
"""Instantiate the CloudData object.
Arguments
---------
data_directory (str) - path of where the images are saved. Defaults to None.
mask_df (pd.DataFrame) - the dataframe of the training data image names and labels, and encoded pixels. Defaults to None.
data_type (str) - whether the data is of type train or test. If it is test, there is no mask information and only the images are gathered. Defaults to "train".
transform (torch.transforms) - pytorch transform(s) to have applied to the images. Defaults to BASIC_TRANSFORM which simply converts image to tensor.
output_width (int) - the width to have the image tensor and masks outputted at.
output_height (int) - the height to have the image tensor and masks outputted at.
"""
super(CloudData,self).__init__()
# the Image_Label field in the mask_df is a string with format {image_name}_{image_label} and need to be broken into two pieces
mask_df['Label'] = [parts[1] for parts in mask_df['Image_Label'].str.split('_')]
mask_df['Image'] = [parts[0] for parts in mask_df['Image_Label'].str.split('_')]
# we are only interested in having one item per unique image and the output for each image will be one mask per class
self.unique_images = mask_df['Image'].unique()
self.mask_df = mask_df
# we need to know the list of unique classes
self.classes = list(mask_df['Label'].unique())
self.data_type = data_type
self.data_directory = data_directory
self.transform = transform
self.normalize_func = normalize_func
self.preprocessing = preprocessing
self.output_width = output_width
self.output_height = output_height
def __getitem__(self, idx):
# get the image name
image_name = self.unique_images[idx]
idx_images = self.mask_df[self.mask_df['Image']==image_name]
# decode the "EncodedPixels" into the mask for each of the classes
masks = []
for c in self.classes:
mask_subset = idx_images[idx_images['Label']==c]
if mask_subset.shape[0] > 0:
masks.append(torch.tensor(decode_pixels(mask_subset.iloc[0]['EncodedPixels'], label=1)))
else:
masks.append(torch.tensor(decode_pixels(np.nan)))
# get the actual image
image = Image.open(self.data_directory+'/train_images/'+image_name)
image_tensor = torchvision.transforms.ToTensor()(image)
del image # save memory
resized_image = (F.interpolate(image_tensor.unsqueeze(0), (self.output_width, self.output_height))).squeeze(0).float()
resized_mask = (F.interpolate(torch.stack(masks).unsqueeze(0), (self.output_width, self.output_height))).squeeze(0).float()
if self.preprocessing:
preprocessed = self.preprocessing(image=resized_image, mask=resized_mask)
resized_img = preprocessed['image']
resized_mask = preprocessed['mask']
if self.transform is None:
if self.normalize_func is None:
return resized_image, resized_mask
else:
return_img, return_mask = self.normalize_func(resized_image, resized_mask)
return return_img, return_mask.float()
else:
if self.normalize_func is None:
return_img, return_mask = self.perform_transform(resized_image, resized_mask,self.transform)
return return_img, return_mask.float()
else:
return_img, return_mask = self.perform_transform(resized_image,resized_mask,self.transform)
return_img, return_mask = self.normalize_func(return_img, return_mask)
return return_img, return_mask.float()
def __len__(self):
return len(self.unique_images)
def perform_transform(self,img, mask, transform_list):
img = (img.permute(1,2,0).numpy()*255).astype(np.uint8)
mask = (mask.permute(1,2,0).numpy()*255).astype(np.uint8)
transformed_img, transformed_mask = transforms_seg.Compose(
transform_list
)(img, mask)
return torch.tensor(transformed_img/255,dtype=torch.float32).permute(2,0,1), torch.tensor(transformed_mask/255,dtype=torch.float32).permute(2,0,1)
def show_image_and_masks(images_and_masks, class_labels = None, inches_per_image = 3):
total_sets = len(images_and_masks)
images_per_set = 1 + images_and_masks[0][1].shape[0]
fig, ax = plt.subplots(total_sets,images_per_set)
fig.set_size_inches((inches_per_image*images_per_set,inches_per_image*total_sets))
for i, set_of_images in enumerate(images_and_masks):
ax[i][0].imshow(set_of_images[0].permute(1,2,0))
ax[i][0].axis('off')
for j in range(1,images_per_set):
ax[i][j].imshow(set_of_images[1][j-1,:,:])
ax[i][j].axis('off')
# now go through each class and add it as a title in the first row of masks
ax[0][0].set_title('Raw Image')
for j in range(1,images_per_set):
ax[0][j].set_title(class_labels[j-1])
fig.tight_layout()
return fig, ax
def show_predicted_masks(pred_masks,true_masks, original_images, inches_per_img=3, classes = None, cmap='coolwarm'):
sigmoid_layer = nn.Sigmoid()
# first, check to make sure there are the same number of pred and true masks
if len(pred_masks) != len(true_masks):
raise ValueError(f"There must be the same number of pred masks as true masks. There are {len(pred_masks)} pred masks and {len(true_masks)} true masks.")
rows = len(pred_masks)*2
columns = pred_masks[0].shape[0]
fig, ax = plt.subplots(rows,columns+1)
fig.set_size_inches((columns*inches_per_img,rows*inches_per_img))
for r in range(int(rows/2)):
for c in range(columns):
ax[r*2][c+1].imshow(true_masks[r][c,:,:])
im = ax[r*2+1][c+1].imshow(sigmoid_layer(pred_masks[r][c,:,:]),cmap=cmap)
if not cmap is None:
fig.colorbar(im,ax=ax[r*2+1][c+1])
if c == 0:
ax[r*2][c].set_ylabel('True Masks')
ax[r*2+1][c].set_ylabel('Pred Masks')
ax[r*2][0].imshow(original_images[r])
ax[r*2+1][0].imshow(original_images[r])
if not classes is None:
for c in range(columns):
ax[0][c+1].set_title(classes[c])
fig.tight_layout()
return fig, ax
"""This function is copied from https://github.com/EdwardRaff/Inside-Deep-Learning/blob/main/idlmam.py"""
def moveTo(obj, device):
"""
obj: the python object to move to a device, or to move its contents to a device
device: the compute device to move objects to
"""
if hasattr(obj, "to"):
return obj.to(device)
elif isinstance(obj, list):
return [moveTo(x, device) for x in obj]
elif isinstance(obj, tuple):
return tuple(moveTo(list(obj), device))
elif isinstance(obj, set):
return set(moveTo(list(obj), device))
elif isinstance(obj, dict):
to_ret = dict()
for key, value in obj.items():
to_ret[moveTo(key, device)] = moveTo(value, device)
return to_ret
else:
return obj
"""This function below is copied from https://github.com/EdwardRaff/Inside-Deep-Learning/blob/main/idlmam.py, but revised
so that the score_funcs are calculated after each batch rather than at the end. Now, at the end, the scores from each
batch are averaged. The reason for this change is for memory. Previously, the y_pred and y_true were being appended to a list
after each batch which required much more space than I have available."""
def run_epoch(model, optimizer, data_loader, loss_func, device, results, score_funcs, prefix="", desc=None):
"""
model -- the PyTorch model / "Module" to run for one epoch
optimizer -- the object that will update the weights of the network
data_loader -- DataLoader object that returns tuples of (input, label) pairs.
loss_func -- the loss function that takes in two arguments, the model outputs and the labels, and returns a score
device -- the compute lodation to perform training
score_funcs -- a dictionary of scoring functions to use to evalue the performance of the model
prefix -- a string to pre-fix to any scores placed into the _results_ dictionary.
desc -- a description to use for the progress bar.
"""
running_loss = []
# make a dictionary of empty lists for each score_func
score_func_batch_results = {}
for name, score_func in score_funcs.items():
score_func_batch_results[name] = []
start = time.time()
for inputs, labels in tqdm(data_loader, desc=desc, leave=False):
#Move the batch to the device we are using.
inputs = moveTo(inputs, device)
labels = moveTo(labels, device)
y_hat = model(inputs) #this just computed f_Θ(x(i))
# Compute loss.
loss = loss_func(y_hat, labels)
if model.training:
loss.backward()
optimizer.step()
optimizer.zero_grad()
#Now we are just grabbing some information we would like to have
running_loss.append(loss.detach().item())
if len(score_funcs) > 0 and isinstance(labels, torch.Tensor):
#moving labels & predictions back to CPU for computing / storing predictions
labels = labels.detach().cpu().numpy()
y_hat = y_hat.detach().cpu().numpy()
#add to predictions so far
y_true = labels
y_pred = y_hat
# quick check for classification vs. regression,
if len(y_pred.shape) == 2 and y_pred.shape[1] > 1: #We have a classification problem, convert to labels
y_pred = np.argmax(y_pred, axis=1)
#Else, we assume we are working on a regression problem
# now going through each of the score functions and appending the score from this batch to it's list of values
for name, score_func in score_funcs.items():
try:
value = score_func(y_true, y_pred)
score_func_batch_results[name].append(value)
except:
score_func_batch_results[name].append(float("NaN"))
#end training epoch
end = time.time()
# now that the epoch is over, average the batch scores for each score function and add them to "results" df
for name, score_func in score_funcs.items():
try:
results[prefix + " " + name].append( np.mean(score_func_batch_results[name]) )
except:
results[prefix + " " + name].append(float("NaN"))
results[prefix + " loss"].append( np.mean(running_loss) )
return end-start #time spent on epoch
"""This is direct copy from https://github.com/EdwardRaff/Inside-Deep-Learning/blob/main/idlmam.py so that it references the new version of 'run_epoch'
"""
def train_network(model, loss_func, train_loader, val_loader=None, test_loader=None,score_funcs=None,
epochs=50, device="cpu", checkpoint_file=None,
lr_schedule=None, optimizer=None, disable_tqdm=False
):
"""Train simple neural networks
Keyword arguments:
model -- the PyTorch model / "Module" to train
loss_func -- the loss function that takes in batch in two arguments, the model outputs and the labels, and returns a score
train_loader -- PyTorch DataLoader object that returns tuples of (input, label) pairs.
val_loader -- Optional PyTorch DataLoader to evaluate on after every epoch
test_loader -- Optional PyTorch DataLoader to evaluate on after every epoch
score_funcs -- A dictionary of scoring functions to use to evalue the performance of the model
epochs -- the number of training epochs to perform
device -- the compute lodation to perform training
lr_schedule -- the learning rate schedule used to alter \eta as the model trains. If this is not None than the user must also provide the optimizer to use.
optimizer -- the method used to alter the gradients for learning.
"""
if score_funcs == None:
score_funcs = {}#Empty set
to_track = ["epoch", "total time", "train loss"]
if val_loader is not None:
to_track.append("val loss")
if test_loader is not None:
to_track.append("test loss")
for eval_score in score_funcs:
to_track.append("train " + eval_score )
if val_loader is not None:
to_track.append("val " + eval_score )
if test_loader is not None:
to_track.append("test "+ eval_score )
total_train_time = 0 #How long have we spent in the training loop?
results = {}
#Initialize every item with an empty list
for item in to_track:
results[item] = []
if optimizer == None:
#The AdamW optimizer is a good default optimizer
optimizer = torch.optim.AdamW(model.parameters())
del_opt = True
else:
del_opt = False
#Place the model on the correct compute resource (CPU or GPU)
model.to(device)
for epoch in tqdm(range(epochs), desc="Epoch", disable=disable_tqdm):
model = model.train()#Put our model in training mode
total_train_time += run_epoch(model, optimizer, train_loader, loss_func, device, results, score_funcs, prefix="train", desc="Training")
results["epoch"].append( epoch )
results["total time"].append( total_train_time )
if val_loader is not None:
model = model.eval() #Set the model to "evaluation" mode, b/c we don't want to make any updates!
with torch.no_grad():
run_epoch(model, optimizer, val_loader, loss_func, device, results, score_funcs, prefix="val", desc="Validating")
#In PyTorch, the convention is to update the learning rate after every epoch
if lr_schedule is not None:
if isinstance(lr_schedule, torch.optim.lr_scheduler.ReduceLROnPlateau):
lr_schedule.step(results["val loss"][-1])
else:
lr_schedule.step()
if test_loader is not None:
model = model.eval() #Set the model to "evaluation" mode, b/c we don't want to make any updates!
with torch.no_grad():
run_epoch(model, optimizer, test_loader, loss_func, device, results, score_funcs, prefix="test", desc="Testing")
if checkpoint_file is not None:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'results' : results
}, checkpoint_file)
if del_opt:
del optimizer
return | pd.DataFrame.from_dict(results) | pandas.DataFrame.from_dict |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
renamed["foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
float_frame.rename(columns={"C": "foo"}, inplace=True)
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]})
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
expected = DataFrame(
[[1], [2]],
index=MultiIndex.from_tuples(
[("foo", "bah"), ("bar", "bas")], names=["a", "b"]
),
columns=["2001-01-01"],
)
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(
data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"]
)
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=["A", "B", "C", "D"])
with pytest.raises(KeyError, match="'E'] not found in axis"):
df.rename(columns={"A": "a", "E": "e"}, errors="raise")
@pytest.mark.parametrize(
"mapper, errors, expected_columns",
[
({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]),
({"A": "a"}, "raise", ["a", "B", "C", "D"]),
(str.lower, "raise", ["a", "b", "c", "d"]),
],
)
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=["A", "B", "C", "D"])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(["L0", "L1", "L2"])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(["L0", "L0", "L0"])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({"foo": stacked, "bar": stacked})
names = ["first", "second"]
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, level_codes) in enumerate(
zip(stacked.index.levels, stacked.index.codes)
):
values = lev.take(level_codes)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(
deleveled["first"], deleveled2["level_0"], check_names=False
)
tm.assert_series_equal(
deleveled["second"], deleveled2["level_1"], check_names=False
)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name="index")
tm.assert_series_equal(rdf["index"], exp)
# default name assigned, corner case
df = float_frame.copy()
df["index"] = "foo"
rdf = df.reset_index()
exp = Series(float_frame.index.values, name="level_0")
tm.assert_series_equal(rdf["level_0"], exp)
# but this is ok
float_frame.index.name = "index"
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled["index"], Series(float_frame.index))
tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = "columns"
resetted = float_frame.reset_index()
assert resetted.columns.name == "columns"
# only remove certain columns
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index(["A", "B"])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index("A")
xp = float_frame.reset_index().set_index(["index", "B"])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index("A", drop=True)
xp = float_frame.copy()
del xp["A"]
xp = xp.set_index(["B"], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
index=Index(range(2), name="x"),
)
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
result = df.set_index(["A", "B"]).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C", "D"]])
# With single-level Index (GH 16263)
result = df.set_index("A").reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index("A").reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(["A"]).reset_index(level=levels[0], drop=True)
tm.assert_frame_equal(result, df[["B", "C", "D"]])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ["A", "B"], ["A"]:
with pytest.raises(KeyError, match="Level E "):
df.set_index(idx_lev).reset_index(level=["A", "E"])
with pytest.raises(IndexError, match="Too many levels"):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series(
(9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed"
)
df = | DataFrame(s1) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import framework.constants as cs
from io import StringIO
from framework.representations.embedding import Embedding
from framework.util import scaleInRange
from framework.util import drop_duplicates
heads_vad = ['Word','Valence','Arousal','Dominance']
heads_be5 = ['Word','Joy','Anger','Sadness','Fear','Disgust']
#### ENGLISH
def load_anew10():
anew = pd.read_csv(cs.anew10, sep = '\t')
anew = anew[['Word','ValMn','AroMn','DomMn']]
anew.columns = ['Word', 'Valence', 'Arousal',
'Dominance']
anew.set_index('Word', inplace=True)
return anew
def load_anew99():
anew=pd.read_csv(cs.anew99, sep='\t')
anew.columns=heads_vad
anew.set_index('Word', inplace=True)
anew=drop_duplicates(anew)
return anew
def load_stevenson07():
stevenson07=pd.read_excel(cs.stevenson07)
stevenson07=stevenson07[['word','mean_hap','mean_ang','mean_sad',
'mean_fear','mean_dis']]
stevenson07.columns=['Word', 'Joy','Anger','Sadness','Fear','Disgust']
stevenson07.set_index('Word', inplace=True)
return stevenson07
def load_warriner13():
warriner13 = pd.read_csv(cs.warriner13, sep=',')
warriner13=warriner13[['Word','V.Mean.Sum', 'A.Mean.Sum', 'D.Mean.Sum']]
warriner13.columns=heads_vad
warriner13.set_index('Word',inplace=True)
#print(warriner13.head())
#print(warriner13.shape)
return warriner13
# #### SPANISH
def load_redondo07():
redondo07=pd.read_excel(cs.redondo07)
redondo07=redondo07[['S-Word','Val-Mn-All','Aro-Mn-All','Dom-Mn-All']]
redondo07.columns = heads_vad
redondo07.set_index('Word', inplace=True)
#print(redondo07.head())
#print(redondo07.shape)
return redondo07
def load_ferre16():
ferre16=pd.read_excel(cs.ferre16)
ferre16=ferre16[['Spanish_Word','Hap_Mean','Ang_Mean','Sad_Mean',
'Fear_Mean','Disg_Mean']]
ferre16.columns=heads_be5
ferre16.set_index('Word', inplace=True)
#print(ferre16.head())
#print(ferre16.shape)
return ferre16
# #### POLISH
def load_riegel15():
riegel15=pd.read_excel(cs.riegel15)
riegel15=riegel15[['NAWL_word','val_M_all','aro_M_all']]
riegel15.columns=['Word','Valence','Arousal']
riegel15['Valence']=scaleInRange(riegel15['Valence'],
oldmin=-3,
oldmax=3,
newmin=1,
newmax=9)
riegel15['Arousal']=scaleInRange(riegel15['Arousal'],
oldmin=1,
oldmax=5,
newmin=1,
newmax=9)
riegel15.set_index('Word', inplace=True)
return riegel15
def load_wierzba15():
wierzba15 = pd.read_excel(cs.wierzba15)
wierzba15=wierzba15[['NAWL_word', 'hap_M_all', 'ang_M_all', 'sad_M_all',
'fea_M_all', 'dis_M_all']]
wierzba15.columns=heads_be5
wierzba15.set_index('Word', inplace=True)
## rescaling basic emotions
## Scaling
for cat in ['Joy', 'Anger', 'Sadness', 'Fear', 'Disgust']:
wierzba15[cat] = [scaleInRange(x=x, oldmin=1.,
oldmax=7., newmin=1., newmax=5.)
for x in wierzba15[cat]]
# print(wierzba15.head())
# print(wierzba15.shape)
return wierzba15
def load_imbir16():
imbir16 = pd.read_excel(cs.imbir16)
imbir16 = imbir16[['polish word', 'Valence_M', 'arousal_M', 'dominance_M']]
imbir16.columns=heads_vad
imbir16.set_index('Word', inplace=True)
# print(imbir16.head())
# print(imbir16.shape)
return imbir16
# ### GERMAN
def load_schmidtke14(lower_case=False):
schmidtke14=pd.read_excel(cs.schmidtke14)
# schmidtke14=schmidtke14[['Word','Valence','Arousal','Dominance']]
schmidtke14=schmidtke14[['G-word', 'VAL_Mean', 'ARO_Mean_(ANEW)', 'DOM_Mean']]
schmidtke14.columns=['Word', 'Valence', 'Arousal', 'Dominance']
# schmidtke14['Word']=schmidtke14['Word'].str.lower()
schmidtke14.set_index('Word', inplace=True)
if lower_case:
schmidtke14.index=schmidtke14.index.str.lower()
#schmidtke14=schmidtke14[~schmidtke14.index.duplicated(keep='first')]
schmidtke14=drop_duplicates(schmidtke14)
schmidtke14.Valence = [scaleInRange(x = x, oldmin = -3.,
oldmax = 3., newmin = 1., newmax=9.)
for x in schmidtke14.Valence]
# ### setting word column to lower case for compatiblity with briesemeister11
# # print(schmidtke14.head())
# # print(schmidtke14.shape)
return schmidtke14
def load_briesemeister11():
briesemeister11=pd.read_excel(cs.briesemeister11)
briesemeister11=briesemeister11[['WORD_LOWER', 'HAP_MEAN', 'ANG_MEAN',
'SAD_MEAN', 'FEA_MEAN', 'DIS_MEAN']]
briesemeister11.columns=heads_be5
briesemeister11.set_index('Word', inplace=True)
briesemeister11=drop_duplicates(briesemeister11)
# print(briesemeister11.head())
# print(briesemeister11.shape)
return briesemeister11
def load_hinojosa16():
hinojosa16a=pd.read_excel(cs.hinojosa16a)
hinojosa16a=hinojosa16a[['Word','Val_Mn', 'Ar_Mn', 'Hap_Mn', 'Ang_Mn','Sad_Mn',
'Fear_Mn', 'Disg_Mn']]
hinojosa16a.columns=['Word', 'Valence', 'Arousal',
'Joy','Anger','Sadness','Fear','Disgust']
hinojosa16a.set_index('Word', inplace=True)
hinojosa16b=pd.read_excel(cs.hinojosa16b)
hinojosa16b=hinojosa16b[['Word', 'Dom_Mn']]
hinojosa16b.columns=['Word','Dominance']
hinojosa16b.set_index('Word', inplace=True)
hinojosa=hinojosa16a.join(hinojosa16b, how='inner')
hinojosa=hinojosa[['Valence', 'Arousal', 'Dominance',
'Joy', 'Anger', 'Sadness', 'Fear', 'Disgust']]
return hinojosa
def load_stadthagen16():
stadthagen16=pd.read_csv(cs.stadthagen16, encoding='cp1252')
stadthagen16=stadthagen16[['Word', 'ValenceMean', 'ArousalMean']]
stadthagen16.columns=['Word', 'Valence', 'Arousal']
stadthagen16.set_index('Word', inplace=True)
return stadthagen16
def load_stadthagen17():
'''
Full lexicon including BE5 and VA
'''
df=pd.read_csv(cs.stadthagen17, encoding='cp1252')
df=df[['Word', 'Valence_Mean', 'Arousal_Mean', 'Happiness_Mean',
'Anger_Mean', 'Sadness_Mean', 'Fear_Mean', 'Disgust_Mean']]
df.columns=['Word','Valence','Arousal', 'Joy','Anger', 'Sadness', 'Fear',
'Disgust']
df.set_index('Word', inplace=True)
return df
def load_kanske10():
with open(cs.kanske10, encoding='cp1252') as f:
kanske10=f.readlines()
# Filtering out the relevant portion of the provided file
kanske10=kanske10[7:1008]
# Creating data frame from string:
#https://stackoverflow.com/questions/22604564/how-to-create-a-pandas-dataframe-from-string
kanske10=pd.read_csv(StringIO(''.join(kanske10)), sep='\t')
kanske10=kanske10[['word', 'valence_mean','arousal_mean']]
kanske10.columns=['Word', 'Valence', 'Arousal']
kanske10['Word']=kanske10['Word'].str.lower()
kanske10.set_index('Word', inplace=True)
return kanske10
def load_vo09():
df=pd.read_csv(cs.vo09, sep=';')
df=df[['WORD_LOWER', 'EMO_MEAN','AROUSAL_MEAN']]
df.columns=['Word', 'Valence', 'Arousal']
df.set_index('Word', inplace=True)
# usecols='WORD_LOWER', 'EMO_MEAN','AROUSAL_MEAN', '')
df['Valence']=scaleInRange( x=df['Valence'],
oldmin=-3,
oldmax=3,
newmin=1,
newmax=9)
df['Arousal']=scaleInRange( x=df['Arousal'],
oldmin=1,
oldmax=5,
newmin=1,
newmax=9)
return df
def load_guasch15():
guasch15=pd.read_excel(cs.guasch15)
guasch15=guasch15[['Word','VAL_M', 'ARO_M']]
guasch15.columns=['Word', 'Valence', 'Arousal']
guasch15.set_index('Word', inplace=True)
return guasch15
def load_moors13():
# with open(cs.moors13) as f:
# moors13=f.readlines()
# moors13=moors13[1:]
# moors13=pd.read_excel(StringIO(''.join(moors13)))
moors13=pd.read_excel(cs.moors13, header=1)
moors13=moors13[['Words', 'M V', 'M A', 'M P']]
moors13.columns=heads_vad
moors13.set_index('Word', inplace=True)
# print(moors13)
return moors13
def load_montefinese14():
montefinese14=pd.read_excel(cs.montefinese14, header=1)
montefinese14=montefinese14[['Ita_Word', 'M_Val', 'M_Aro', 'M_Dom']]
montefinese14.columns=heads_vad
montefinese14.set_index('Word', inplace=True)
return montefinese14
def load_soares12():
soares12=pd.read_excel(cs.soares12, sheetname=1)
soares12=soares12[['EP-Word', 'Val-M', 'Arou-M', 'Dom-M']]
soares12.columns=heads_vad
soares12.set_index('Word', inplace=True)
return soares12
def load_sianipar16():
sianipar16=pd.read_excel(cs.sianipar16)
sianipar16=sianipar16[['Words (Indonesian)', 'ALL_Valence_Mean', 'ALL_Arousal_Mean', 'ALL_Dominance_Mean']]
sianipar16.columns=heads_vad
sianipar16.set_index('Word', inplace=True)
#sianipar16=sianipar16[~sianipar16.index.duplicated(keep='first')]
sianipar16=drop_duplicates(sianipar16)
return sianipar16
def load_yu16():
'''
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., … <NAME>.
(2016). Building Chinese Affective Resources in Valence-Arousal Dimensions.
In Proceedings of NAACL-2016.
'''
yu16=pd.read_csv(cs.yu16)
yu16=yu16[['Word', 'Valence_Mean', 'Arousal_Mean']]
yu16.columns=heads_vad[:-1]
yu16.set_index('Word', inplace=True)
return yu16
def load_yu16_ialp_train_test():
train=pd.read_csv(cs.yu16)
train=train[['No.', 'Word', 'Valence_Mean', 'Arousal_Mean']]
train.columns=[['id', 'Word', 'Valence', 'Arousal']]
# train.set_index('id', inplace=True)
test=train.copy()
train=train.loc[train.id.isin(range(1,1654))]
test=test.loc[test.id.isin(range(1654,2150))]
def __format__(df):
return df[['Word', 'Valence', 'Arousal']].set_index('Word')
test=__format__(test)
train=__format__(train)
return train,test
def load_yao16():
'''
<NAME>., <NAME>., <NAME>., & <NAME>. (2016). Norms of valence, arousal,
concreteness, familiarity, imageability, and context availability for
1,100 Chinese words. Behavior Research Methods.
'''
with open(cs.yao16) as f:
yao=f.readlines()
yao16= | pd.DataFrame(columns=['Word','Valence','Arousal']) | pandas.DataFrame |
"""
A denoiser tries to cancel noise. (also water is wet)
"""
__docformat__ = "google"
from scipy.spatial.distance import cdist
import numpy as np
import pandas as pd
from nmoo.wrapped_problem import WrappedProblem
class KNNAvg(WrappedProblem):
"""
Implementation of the KNN-Avg algorithm of Klikovits and Arcaini.
See also:
`Original KNN-Avg repository
<https://github.com/ERTOMMSD/QUATIC2021-KNN-Averaging>`_
`KNN-Avg QUATIC2021 paper
<https://raw.githubusercontent.com/ERATOMMSD/QUATIC2021-KNN-Averaging/main/KlikovitsArcaini-KNNAvgForNoisyNoisyMOO.pdf>`_
"""
_distance_weight_mode: str
_max_distance: float
_n_neighbors: int
def __init__(
self,
problem: WrappedProblem,
max_distance: float,
n_neighbors: int = 5, # KNN
distance_weight_type: str = "uniform",
*,
name: str = "knn_avg",
):
"""
Constructor.
Args:
problem (:obj:`WrappedProblem`): Noisy problem. For memory
optimization reasons, this should be a `WrappedProblem` as
opposed to a pymoo `Problem`.
distance_weight_type (str): Either "squared" or "uniform".
max_distance (float): Distance cutoff.
n_neighbors (int): Number of neighbors to consider (KNN).
name (str): An optional name for this problem. This will be used
when creating history dump files. Defaults to `knn_avg`.
"""
super().__init__(problem, name=name)
if distance_weight_type not in ["squared", "uniform"]:
raise ValueError(
"Parameter distance_weight_type must be either 'squared' or "
"'uniform'."
)
self._distance_weight_mode = distance_weight_type
if max_distance < 0.0:
raise ValueError(
"Parameter max_distance must either be 'None' or >= 0."
)
self._max_distance = max_distance
if n_neighbors <= 0:
raise ValueError("Parameter n_neighbors must be >= 1.")
self._n_neighbors = n_neighbors
def _evaluate(self, x, out, *args, **kwargs):
"""
Applies the KNN-Avg algorithm to the wrapped (noisy) problem's output.
"""
self._problem._evaluate(x, out, *args, **kwargs)
for i, sol in enumerate(x):
# Store the solution history into a dataframe (note that we are
# using the wrapped problem's history to make sure this dataframe
# is never empty).
x_hist = | pd.DataFrame(self._problem._history["X"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In this notebook we try to practice all the classification algorithms that we learned in this course.
#
# We load a dataset using Pandas library, and apply the following algorithms, and find the best one for this specific dataset by accuracy evaluation methods.
#
# Lets first load required libraries:
# In[1]:
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from sklearn import preprocessing
get_ipython().run_line_magic('matplotlib', 'inline')
# ### About dataset
# This dataset is about past loans. The __Loan_train.csv__ data set includes details of 346 customers whose loan are already paid off or defaulted. It includes following fields:
#
# | Field | Description |
# |----------------|---------------------------------------------------------------------------------------|
# | Loan_status | Whether a loan is paid off on in collection |
# | Principal | Basic principal loan amount at the |
# | Terms | Origination terms which can be weekly (7 days), biweekly, and monthly payoff schedule |
# | Effective_date | When the loan got originated and took effects |
# | Due_date | Since it’s one-time payoff schedule, each loan has one single due date |
# | Age | Age of applicant |
# | Education | Education of applicant |
# | Gender | The gender of applicant |
# Lets download the dataset
# In[2]:
get_ipython().system('wget -O loan_train.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_train.csv')
# ### Load Data From CSV File
# In[3]:
df = pd.read_csv('loan_train.csv')
df.head()
# In[4]:
df.shape
# ### Convert to date time object
# In[5]:
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df.head()
# # Data visualization and pre-processing
#
#
# Let’s see how many of each class is in our data set
# In[6]:
df['loan_status'].value_counts()
# 260 people have paid off the loan on time while 86 have gone into collection
#
# Lets plot some columns to underestand data better:
# In[7]:
# notice: installing seaborn might takes a few minutes
get_ipython().system('conda install -c anaconda seaborn -y')
# In[8]:
import seaborn as sns
bins = np.linspace(df.Principal.min(), df.Principal.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'Principal', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# In[9]:
bins = np.linspace(df.age.min(), df.age.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'age', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# # Pre-processing: Feature selection/extraction
# ### Lets look at the day of the week people get the loan
# In[10]:
df['dayofweek'] = df['effective_date'].dt.dayofweek
bins = np.linspace(df.dayofweek.min(), df.dayofweek.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'dayofweek', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
# We see that people who get the loan at the end of the week dont pay it off, so lets use Feature binarization to set a threshold values less then day 4
# In[11]:
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
df.head()
# ## Convert Categorical features to numerical values
# Lets look at gender:
# In[12]:
df.groupby(['Gender'])['loan_status'].value_counts(normalize=True)
# 86 % of female pay there loans while only 73 % of males pay there loan
#
# Lets convert male to 0 and female to 1:
#
# In[13]:
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df.head()
# ## One Hot Encoding
# #### How about education?
# In[14]:
df.groupby(['education'])['loan_status'].value_counts(normalize=True)
# #### Feature befor One Hot Encoding
# In[15]:
df[['Principal','terms','age','Gender','education']].head()
# #### Use one hot encoding technique to conver categorical varables to binary variables and append them to the feature Data Frame
# In[16]:
Feature = df[['Principal','terms','age','Gender','weekend']]
Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1)
Feature.drop(['Master or Above'], axis = 1,inplace=True)
Feature.head()
# ### Feature selection
# Lets defind feature sets, X:
# In[17]:
X = Feature
X[0:5]
# What are our lables?
# In[18]:
y = df['loan_status'].values
y[0:5]
# ## Normalize Data
# Data Standardization give data zero mean and unit variance (technically should be done after train test split )
# In[19]:
X= preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
# # Classification
# We use the test set to report the accuracy of the model
# we gona use the following algorithm:
# - K Nearest Neighbor(KNN)
# - Decision Tree
# - Support Vector Machine
# - Logistic Regression
#
# # K Nearest Neighbor(KNN)
#
# In[20]:
# Import the library for splitting the data into train and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)
print ('the shape of the features train set:', X_train.shape,', the shape of the target train set:' ,y_train.shape)
print ('the shape of the feature test set:', X_test.shape,', the shape of the target test set:',y_test.shape)
# In[21]:
# Importing the KNN model
from sklearn.neighbors import KNeighborsClassifier
# Importing the metrics
from sklearn import metrics
Ks = 10
mean_acc = np.zeros((Ks-1))
std_acc = np.zeros((Ks-1))
ConfustionMx = [];
for n in range(1,Ks):
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train)
yhat=neigh.predict(X_test)
mean_acc[n-1] = metrics.accuracy_score(y_test, yhat)
std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0])
mean_acc
# In[22]:
plt.plot(range(1,Ks),mean_acc,'g')
plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10)
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ')
plt.xlabel('Number of Neighbors (K)')
plt.tight_layout()
plt.show()
# In[23]:
print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1)
# In[24]:
# Set value of k as 7
k = 7
# Train Model and Predict
loanknn = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train)
loanknn
# In[25]:
yhat = loanknn.predict(X_test)
yhat[0:5]
# In[26]:
print("Train set Accuracy: ", metrics.accuracy_score(y_train, loanknn.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
# In[27]:
from sklearn.metrics import classification_report
print (classification_report(y_test, yhat))
# In[28]:
from sklearn.metrics import f1_score
f1_score(y_test, yhat, average='weighted')
# In[29]:
from sklearn.metrics import jaccard_similarity_score
jaccard_similarity_score(y_test, yhat)
# # Decision Tree
# In[30]:
# Import the decision tree model
from sklearn.tree import DecisionTreeClassifier
# In[31]:
md = 10
mean_acc = np.zeros((md-1))
std_acc = np.zeros((md-1))
ConfustionMx = [];
for n in range(1,md):
#Train Model and Predict
loant = DecisionTreeClassifier(criterion="entropy", max_depth = n).fit(X_train,y_train)
yhat=loant.predict(X_test)
mean_acc[n-1] = metrics.accuracy_score(y_test, yhat)
std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0])
mean_acc
# In[32]:
plt.plot(range(1,md),mean_acc,'r')
plt.fill_between(range(1,md),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10)
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ')
plt.xlabel('Number of Max Depth')
plt.tight_layout()
plt.show()
# In[33]:
loandt = DecisionTreeClassifier(criterion="entropy", max_depth = 6)
# Check the default parameters
loandt
# Train the Decision tree model
loandt.fit(X_train,y_train)
# Predict using the model
yhat= loandt.predict(X_test)
# In[34]:
print("Train set Accuracy: ", metrics.accuracy_score(y_train, loandt.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
# In[35]:
print (classification_report(y_test, yhat))
# In[36]:
# Calculate the F1 score
f1_score(y_test, yhat, average='weighted')
# In[37]:
# Calculate the jaccard index
jaccard_similarity_score(y_test, yhat)
# In[38]:
#!conda install -c conda-forge pydotplus -y
#!conda install -c conda-forge python-graphviz -y
# In[39]:
'''from sklearn.externals.six import StringIO
import pydotplus
import matplotlib.image as mpimg
from sklearn import tree
%matplotlib inline '''
# In[40]:
'''dot_data = StringIO()
filename = "loantree.png"
featureNames = Feature.columns
targetNames = df['loan_status'].unique().tolist()
out=tree.export_graphviz(loandt,feature_names=featureNames, out_file=dot_data, class_names= np.unique(y_train), filled=True, special_characters=True,rotate=False)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png(filename)
img = mpimg.imread(filename)
plt.figure(figsize=(100, 200))
plt.imshow(img,interpolation='nearest')'''
# # Support Vector Machine
# In[41]:
# Import the library for SVM Classifier
from sklearn import svm
# Build a SVM Classifier with a Radial base Function Kernel
loansvm1 = svm.SVC(kernel='rbf').fit(X_train, y_train)
yhat1 = loansvm1.predict(X_test)
svm_r = metrics.accuracy_score(y_test, yhat1)
# Build a SVM Classifier with a Linear Kernel
loansvm2 = svm.SVC(kernel='linear').fit(X_train, y_train)
yhat2 = loansvm2.predict(X_test)
svm_l = metrics.accuracy_score(y_test, yhat2)
# Build a SVM Classifier with a Polynomial Kernel
loansvm3 = svm.SVC(kernel='poly').fit(X_train, y_train)
yhat3 = loansvm3.predict(X_test)
svm_p = metrics.accuracy_score(y_test, yhat3)
# Build a SVM Classifier with a Sigmoid Kernel
loansvm4 = svm.SVC(kernel='sigmoid').fit(X_train, y_train)
yhat4 = loansvm4.predict(X_test)
svm_s = metrics.accuracy_score(y_test, yhat4)
print(svm_r,svm_l,svm_p,svm_s)
# In[42]:
# Find if labels are missing in the SVM models
print("The label missing in the first model with rbf kernel",set(y_test) - set(yhat1))
print("The label missing in the second model with linear",set(y_test) - set(yhat2))
print("The label missing in the third model with polynomial kernel",set(y_test) - set(yhat3))
print("The label missing in the fourth model with sigmoid kernel",set(y_test) - set(yhat4))
# In[43]:
# Build and train the SVM Classifier with a linear kernel
loansvm = svm.SVC(kernel='rbf').fit(X_train, y_train)
# In[44]:
yhat = loansvm.predict(X_test)
yhat [0:5]
# In[45]:
print("Train set Accuracy: ", metrics.accuracy_score(y_train, loansvm.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
# In[46]:
print (classification_report(y_test, yhat))
# In[47]:
# Calculate the f1 score
f1_score(y_test, yhat, average='weighted')
# In[48]:
#Calculate the Jaccard index
jaccard_similarity_score(y_test, yhat)
# # Logistic Regression
# In[49]:
# Import the library for Logistice regression
from sklearn.linear_model import LogisticRegression
# Build and train the logestic regression model
loanlr1 = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train)
yhat1 = loanlr1.predict(X_test)
loanlr_a1 = metrics.accuracy_score(y_test, yhat1)
# Build and train the logestic regression model
loanlr2 = LogisticRegression(C=0.01, solver='sag').fit(X_train,y_train)
yhat2 = loanlr2.predict(X_test)
loanlr_a2 = metrics.accuracy_score(y_test, yhat2)
# Build and train the logestic regression model
loanlr3 = LogisticRegression(C=0.01, solver='saga').fit(X_train,y_train)
yhat3 = loanlr3.predict(X_test)
loanlr_a3 = metrics.accuracy_score(y_test, yhat3)
# Build and train the logestic regression model
loanlr4 = LogisticRegression(C=0.01, solver='newton-cg').fit(X_train,y_train)
yhat4 = loanlr4.predict(X_test)
loanlr_a4 = metrics.accuracy_score(y_test, yhat4)
# Build and train the logestic regression model
loanlr5 = LogisticRegression(C=0.01, solver='lbfgs').fit(X_train,y_train)
yhat5 = loanlr5.predict(X_test)
loanlr_a5 = metrics.accuracy_score(y_test, yhat5)
print('LR model with liblinear solver',loanlr_a1)
print('LR model with sag solver',loanlr_a2)
print('LR model with saga solver',loanlr_a3)
print('LR model with newton-cg solver',loanlr_a4)
print('LR model with lbfgs solver',loanlr_a5)
# In[50]:
# Find if labels are missing in the models
print("The label missing in the LR model with liblinear solver",set(y_test) - set(yhat1))
print("The label missing in the LR model with sag solver",set(y_test) - set(yhat2))
print("The label missing in the LR model with saga solver",set(y_test) - set(yhat3))
print("The label missing in the LR model with newton-cg solver",set(y_test) - set(yhat4))
print("The label missing in the LR model with lbfgs solver",set(y_test) - set(yhat5))
# In[51]:
loanlr = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train)
yhat = loanlr.predict(X_test)
# In[52]:
print("Train set Accuracy: ", metrics.accuracy_score(y_train, loanlr.predict(X_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, yhat))
# In[53]:
print (classification_report(y_test, yhat))
# In[54]:
# Calculate the f1 score
f1_score(y_test, yhat, average='weighted')
# In[55]:
#Calculate the Jaccard index
jaccard_similarity_score(y_test, yhat)
# # Model Evaluation using Test set
# In[56]:
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
# First, download and load the test set:
# In[57]:
get_ipython().system('wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv')
# ### Load Test set for evaluation
# In[58]:
test_df = pd.read_csv('loan_test.csv')
test_df.head()
# In[59]:
# shape of the test data set
test_df.shape
# In[60]:
# Count of the loan status
test_df['loan_status'].value_counts()
# In[61]:
df = test_df
df['due_date'] = | pd.to_datetime(df['due_date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 11:36:48 2021
@author: nb137
"""
# Data Setup
import os
import sys
# Hide my folder tree for publication online, but this is me ensuring I've updated my data
os.system(r'github COVID_GH_FOLDER') # Can't update from terminal, but this will remind me to pull data if i haven't
response = input("Was GitHub updated? y/n: ")
if response == 'n':
sys.exit()
elif response =='y':
pass
else:
print('Did not input y or n, stopping')
sys.exit()
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from covid_support import combine, cols, states, state_info
today = pd.datetime.today().strftime('%Y-%m-%d')
latest_data = combine.index.max().strftime('%Y-%m-%d')
print("Dashboard updated:\t{} \nLatest Data:\t\t{}".format(today,latest_data))
import matplotlib
matplotlib.rc('font',size=8)
# Select AZ, CA, ME, NY OR,TX WA, WI
state_info = state_info.iloc[[0,1,7,8,9,12,14,15,16]]
# New Cases Per Capita
# TODO: Plot days ago to show indication of when cases may not be reported yet
plt.figure(figsize=(12,6))
for i,r in state_info.iterrows():
new_cases_per_pop = combine['Confirmed',r['State']].diff().rolling(7).mean()/(r['Pop']/1e5)
plt.plot(combine.index, new_cases_per_pop,linewidth=3, label=r['State'])
plt.title('New Cases/100k/day 7d rolling')
plt.yscale('log')
plt.ylabel('log scale cases/100k')
plt.xlabel('Date')
plt.ylim(1e0,15e1)
plt.legend()
plt.grid(which='both')
# Testing Rates
combine = combine['6/1/2020':]
for sub in [state_info]:
td = combine.index[-1]
plt.figure(figsize=(12,6))
plt.subplot(2,1,1)
for i,r in sub.iterrows():
new_cases_per_pop = combine['Confirmed',r['State']].diff().rolling(7).mean()/(r['Pop']/1e5)
new_tests_per_pop = combine['Testing_Rate',r['State']].diff().rolling(7).mean()
new_tests_per_pop[new_tests_per_pop <0] = np.nan
perc_test_pos = new_cases_per_pop/new_tests_per_pop
plt.plot(combine.index, perc_test_pos, label=r['State'])
plt.plot(( | pd.datetime(2020,6,8) | pandas.datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 2 11:39:57 2018
@author: <NAME>
@Contains : Pre-processing functions
"""
import pandas as pd
import numpy as np
import json
def mapprice(v):
if pd.isnull(v):
return [ np.nan]
try:
vv = v.split('-')
p0 = vv[0].strip()[1:].replace(",","")
return [float(p0)]
except ValueError:
return [np.nan]
def mapnumber_available_in_stock(v):
if pd.isnull(v):
return np.NaN ,np.NaN
try:
vv = v.split('\xa0')
return int(vv[0]),str(vv[1])
except ValueError:
return np.NaN ,np.NaN
def mapnumber_of_reviews(v):
if pd.isnull(v):
return np.nan
try:
vv = v.replace(",","")
return int(vv)
except ValueError:
return np.nan
def mapaverage_review_rating(v):
if pd.isnull(v):
return np.nan
try:
vv = float(v.split('out')[0].strip())
return vv
except ValueError:
return np.nan
# read json data of seller
def myseller(v):
if pd.isnull(v):
return 0
try:
vv = v.replace('=>',':')
djson = pd.read_json(vv,orient='records')
dict_data = json.loads(djson)
return dict_data
except ValueError:
return 0
#split category
def mapcategories(srs):
if | pd.isnull(srs) | pandas.isnull |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
| pd.timedelta_range('1 day', periods=5) | pandas.timedelta_range |
# File System
import os
import json
from pathlib import Path
from zipfile import ZipFile
import pickle
import gc
import numpy as np
import pandas as pd
from sympy.geometry import *
DATA_PATH = '../data/' # Point this constant to the location of your data archive files
EXPECTED_DATASETS = {'Colorado': [
'county_total_population.Colorado.zip',
'covid_county.Colorado.zip',
'neon_2d_wind.Colorado.zip',
'neon_barometric_pressure.Colorado.zip',
'neon_single_asp_air_temperature.Colorado.zip'
]}
counties = {'Colorado': ['Boulder', 'Grand', 'Larimer', 'Logan', 'Weld', 'Yuma']}
def get_datasets(state='Colorado'):
# Returns dataframes in order: control, covidWind, covidPressure, covidTemperature
extract(state)
create_pickles(state)
return pd.read_pickle(f'../data/control.{state}.pkl'), | pd.read_pickle(f'../data/covidWind.{state}.pkl') | pandas.read_pickle |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
###
"""
Load Data function
Arguments:
messages_filepath -> path to messages csv file
categories_filepath -> path to categories csv file
Output:
df -> Loaded dasa as Pandas DataFrame
"""
##
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from sklearn import linear_model
def allign_alleles(df):
"""Look for reversed alleles and inverts the z-score for one of them.
Here, we take advantage of numpy's vectorized functions for performance.
"""
d = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
a = [] # array of alleles
for colname in ['A1_ref', 'A2_ref', 'A1_gen', 'A2_gen', 'A1_y', 'A2_y']:
tmp = np.empty(len(df[colname]), dtype=int)
for k, v in d.items():
tmp[np.array(df[colname]) == k] = v
a.append(tmp)
matched_alleles_gen = (((a[0] == a[2]) & (a[1] == a[3])) |
((a[0] == 3 - a[2]) & (a[1] == 3 - a[3])))
reversed_alleles_gen = (((a[0] == a[3]) & (a[1] == a[2])) |
((a[0] == 3 - a[3]) & (a[1] == 3 - a[2])))
matched_alleles_y = (((a[0] == a[4]) & (a[1] == a[5])) |
((a[0] == 3 - a[4]) & (a[1] == 3 - a[5])))
reversed_alleles_y = (((a[0] == a[5]) & (a[1] == a[4])) |
((a[0] == 3 - a[5]) & (a[1] == 3 - a[4])))
df['Z_y'] *= -2 * reversed_alleles_y + 1
df['reversed'] = reversed_alleles_gen
df = df[((matched_alleles_y|reversed_alleles_y)&(matched_alleles_gen|reversed_alleles_gen))]
def get_files(file_name, chr):
if '@' in file_name:
valid_files = []
if chr is None:
for i in range(1, 23):
cur_file = file_name.replace('@', str(i))
if os.path.isfile(cur_file):
valid_files.append(cur_file)
else:
raise ValueError('No file matching {} for chr {}'.format(
file_name, i))
else:
cur_file = file_name.replace('@', chr)
if os.path.isfile(cur_file):
valid_files.append(cur_file)
else:
raise ValueError('No file matching {} for chr {}'.format(
file_name, chr))
return valid_files
else:
if os.path.isfile(file_name):
return [file_name]
else:
ValueError('No files matching {}'.format(file_name))
def prep(bfile, genotype, sumstats2, N2, phenotype, covariates, chr, start, end):
bim_files = get_files(bfile + '.bim', chr)
genotype_files = get_files(genotype + '.bim', chr)
# read in bim files
bims = [pd.read_csv(f,
header=None,
names=['CHR', 'SNP', 'CM', 'BP', 'A1', 'A2'],
delim_whitespace=True) for f in bim_files]
bim = pd.concat(bims, ignore_index=True)
genotype_bims = [pd.read_csv(f,
header=None,
names=['CHR', 'SNP', 'CM', 'BP', 'A1', 'A2'],
delim_whitespace=True) for f in genotype_files]
genotype_bim = pd.concat(genotype_bims, ignore_index=True)
if chr is not None:
if start is None:
start = 0
if end is None:
end = float('inf')
genotype_bim = genotype_bim[np.logical_and(np.logical_and(genotype_bim['CHR']==chr, genotype_bim['BP']<=end), genotype_bim['BP']>=start)].reset_index(drop=True)
bim = bim[np.logical_and(np.logical_and(bim['CHR']==chr, bim['BP']<=end), bim['BP']>=start)].reset_index(drop=True)
summary_stats = pd.read_csv(sumstats2, delim_whitespace=True)
# rename cols
bim.rename(columns={'CHR': 'CHR_ref', 'CM': 'CM_ref', 'BP':'BP_ref', 'A1': 'A1_ref', 'A2': 'A2_ref'}, inplace=True)
genotype_bim.rename(columns={'CHR': 'CHR_gen', 'CM': 'CM_gen', 'BP':'BP_gen', 'A1': 'A1_gen', 'A2': 'A2_gen'}, inplace=True)
summary_stats.rename(columns={'A1': 'A1_y', 'A2': 'A2_y', 'N': 'N_y', 'Z': 'Z_y'}, inplace=True)
# take overlap between output and ref genotype files
df = pd.merge(bim, genotype_bim, on=['SNP']).merge(summary_stats, on=['SNP'])
# flip sign of z-score for allele reversals
allign_alleles(df)
df = df.drop_duplicates(subset='SNP', keep=False)
if N2 is not None:
N2 = N2
else:
N2 = summary_stats['N_y'].max()
df.rename(columns={'CHR_ref':'CHR'}, inplace=True)
ggr_df = pd.read_csv(phenotype, header=None, names=['IID', 'Phenotype'], delim_whitespace=True, usecols=[1, 2])
fam_files = get_files(genotype + '.fam', chr)
for i in range(len(fam_files)):
fam_data = pd.read_csv(fam_files[i], header=None, names=['IID'], delim_whitespace=True, usecols=[1])
ggr_df = pd.merge(fam_data, ggr_df, on=['IID'])
ii = ggr_df['Phenotype'] != 9
pheno_avg = np.mean(ggr_df['Phenotype'][ii])
ggr_df['Phenotype'][np.logical_not(ii)] = pheno_avg
if covariates is not None:
covariates_df = pd.read_csv(covariates, header=None, delim_whitespace=True)
covariates_df = covariates_df.iloc[:, 1:]
covariates_df = covariates_df.rename(columns={1:'IID'})
regression_df = pd.merge(ggr_df[['IID']], covariates_df, on=['IID'])
ggr_df = | pd.merge(ggr_df, covariates_df[['IID']], on=['IID']) | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
"""
analysis and optimization
"""
import logging
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from matplotlib import pyplot as plt
from scipy import stats
from scipy.optimize import differential_evolution
from sklearn.ensemble import RandomForestRegressor # pylint: disable=ungrouped-imports
from sklearn.linear_model import LinearRegression # pylint: disable=ungrouped-imports
from stldeli import config
# pylint: disable=too-many-statements,too-many-locals
def main():
"""
main
:return:
"""
data = pd.read_csv("data.csv").clean_column_names()
y_df = data[data.columns.intersection(['tension_strength'])]
x_df = data[config.important_features]
logging.info(x_df.columns)
strength_regressor_rf = RandomForestRegressor()
strength_regressor_rf.fit(x_df, y_df.values.reshape(-1))
feature_importance = pd.concat([pd.Series(strength_regressor_rf.feature_importances_, name='importance'),
pd.Series(x_df.columns, name='feature')
], axis=1
).sort_values('importance')
_, axis = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
feature_importance.plot.barh(x='feature',
y='importance',
color='grey',
legend=False,
ax=axis
)
axis.set_xlabel('relative importance')
axis.set_ylabel('')
axis.set_title('Tensile Strength')
strength_predicted = pd.Series(strength_regressor_rf.predict(x_df), name='predicted')
strength_actual = pd.Series(y_df.values.reshape(-1), name='actual')
strength_residual = strength_actual - strength_predicted
pd.concat([strength_predicted,
strength_actual]
, axis=1
).plot.scatter(x='actual', y='predicted', title='Tensile Strength (MPa)')
strength_residual.name = 'residual'
pd.concat([strength_predicted,
strength_residual]
, axis=1
).plot.scatter(x='predicted', y='residual', title='Tensile Strength (MPa)')
np.sqrt(sklearn.metrics.mean_squared_error(y_true=strength_actual,
y_pred=strength_predicted
)
)
stats.probplot(strength_residual, dist="norm", plot=plt)
metadata = pd.read_csv('metadata.csv')
metadata.info()
newx = metadata[config.strength_controllable_parameters]
tensile = strength_regressor_rf.predict(newx)
tensile_series = pd.Series(tensile, name='tensile_strength_predicted')
tensile_series.plot.hist()
metadata_enriched = metadata.join(tensile_series)
filament = metadata['filament used '].str.strip(' ').str.split(' ', expand=True).rename(
columns={0: 'filament_used_mm',
1: 'filament_used_cm3'
}
)
metadata_enriched['filament_used_mm'] = filament['filament_used_mm'] \
.str.replace('mm', '') \
.apply(float)
metadata_enriched['filament_used_cm3'] = filament['filament_used_cm3'] \
.str.strip('()') \
.str.replace('cm3', '') \
.apply(float)
metadata_enriched['infill extrusion width (mm)'] = metadata['infill extrusion width '] \
.str.replace('mm', '') \
.apply(float)
figure = sns.lmplot(
x='filament_used_cm3',
y='tensile_strength_predicted',
hue='--infill-every-layers',
# col='--fill-density',
# row='--layer-height',
data=metadata_enriched
)
figure.axes[0, 0].set_xlabel('filament used ($cm^3$)')
figure.axes[0, 0].set_ylabel('tensile strength (MPa)')
y_df = metadata_enriched[metadata_enriched.columns.intersection(['filament_used_cm3'])]
x_df = metadata_enriched[config.filament_controllable_parameters]
y_df.plot.hist(legend=False)
filament_regressor_rf = RandomForestRegressor()
filament_regressor_rf.fit(x_df, y_df.values.reshape(-1))
# filament_regressor_linear.fit(x_numeric.dropna(1),y)
feature_importance = pd.concat([pd.Series(filament_regressor_rf.feature_importances_, name='importance'),
pd.Series(x_df.columns, name='feature')
], axis=1
).sort_values('importance')
feature_importance.dropna()
_, axis = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
feature_importance.dropna()[-10:].plot.barh(
x='feature',
y='importance',
color='grey',
legend=False,
ax=axis
)
axis.set_xlabel('relative importance')
axis.set_ylabel('')
axis.set_title('Filament Usage')
filament_predicted = pd.Series(filament_regressor_rf.predict(x_df), name='predicted')
filament_actual = pd.Series(y_df.values.reshape(-1), name='actual')
filament_residual = filament_actual - filament_predicted
filament_residual.name = 'residual'
pd.concat([filament_predicted, filament_actual], axis=1).plot.scatter(x='actual', y='predicted',
title='Filament Used ($cm^3$)')
np.sqrt(sklearn.metrics.mean_squared_error(y_pred=filament_predicted, y_true=filament_actual))
stats.probplot(filament_residual.sample(n=100), dist="norm", plot=plt)
average_filament = metadata_enriched['filament_used_cm3'].mean()
average_strength = metadata_enriched['tensile_strength_predicted'].mean()
def cost_function(input_array, progress_df):
"""
layer_height,
fill_density,
infill_every_layers,
wall_thickness,
nozzle_temperature
"""
(layer_height,
fill_density,
infill_every_layers,
wall_thickness,
nozzle_temperature) = input_array
x_strength = pd.Series(
{'layer_height': layer_height,
'fill_density': fill_density,
'wall_thickness': wall_thickness,
'nozzle_temperature': nozzle_temperature}
)
x_filament = pd.Series(
{'layer_height': layer_height,
'fill_density': fill_density,
'infill_every_layers': infill_every_layers,
})
_strength = strength_regressor_rf.predict(x_strength.values.reshape(1, -1))
_filament = filament_regressor_rf.predict(x_filament.values.reshape(1, -1))
cost = 0.5 * _filament / average_filament - 0.5 * _strength / average_strength
row = pd.Series(np.append(input_array, cost), # pylint: disable=used-before-assignment
index=('layer_height',
'fill_density',
'infill_every_layers',
'wall_thickness',
'nozzle_temperature',
'iteration',
'cost'
))
progress_df.append(row)
return cost
cost_function(np.array([0.02, 10.00, 1, 1, 200]), | pd.DataFrame() | pandas.DataFrame |
"""
Script to check results files for compliance
with a dietary restriction at the recipe level.
To run, first download food.csv from https://www.foodb.ca/downloads.
"""
import os
import re
import csv
import pandas as pd
from apply_tag import apply_tag
def get_ing_list():
"""Get ingredient list from FooDB."""
ing_list = | pd.read_csv('/sample_data/food.csv') | pandas.read_csv |
# Steven 05/17/2020
# clustering model design
from time import time
import pandas as pd
import numpy as np
# from sklearn.decomposition import PCA
# from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
# from sklearn.cluster import DBSCAN
# from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler # StandardScaler
# from sklearn.model_selection import StratifiedKFold
# from sklearn.model_selection import GridSearchCV
from sklearn.metrics import silhouette_score
# from sklearn.metrics import make_scorer
# from sklearn.metrics import calinski_harabasz_score
from sklearn.metrics import davies_bouldin_score
from sklearn.neighbors._nearest_centroid import NearestCentroid
import matplotlib.pyplot as plt
def createKMeans(k=2):
model = KMeans(n_clusters=k, random_state=0)
# print(model,k)
# print('k=',k)
return model
def s_score_silhouette(estimator, X):
labels_ = estimator.fit_predict(X)
score = 0
# print(X.shape)
# print(X)
actualK = len(list(set(labels_)))
if actualK > 1:
# print(labels_)
score = silhouette_score(X, labels_, metric='euclidean') # 'euclidean'
# score = calinski_harabasz_score(X, labels_)
# score = davies_bouldin_score(X, labels_)
# print(score)
return score
def squaredDistances(a, b):
return np.sum((a - b)**2)
def calculateSSE2(data, labels, centroids):
print(data.shape, type(data), centroids.shape)
sse = 0
for i, ct in enumerate(centroids):
# print('i,ct=',i,ct)
samples = []
for k in range(data.shape[0]):
label = labels[k]
sample = data.iloc[k, :].values
# print('sample,label=',i,sample,label)
if label == i:
# sse += squaredDistances(sample,ct)
samples.append(sample)
sse += squaredDistances(samples, ct)
return sse
def calculateSSE(data, labels, centroids):
# print(data.shape,type(data),centroids.shape,labels.shape)
# print('labels=',labels)
# data = data.to_numpy()#if dataframe
sse = 0
for i, ct in enumerate(centroids):
# print('i,ct=',i,ct)
# samples = data.iloc[np.where(labels == i)[0], :].values
samples = data[np.where(labels == i)[0], :]
sse += squaredDistances(samples, ct)
return sse
def calculateDaviesBouldin(data, labels):
return davies_bouldin_score(data, labels)
def getModelMeasure(data, labels):
sse, dbValue, csm = 0, 0, 0
# k = len(np.unique(labels))
k = len(list(set(labels)))
if k > 1:
# print(data.shape,model.labels_)
# csm = silhouette_score(data, labels, metric='euclidean')
clf = NearestCentroid()
clf.fit(data, labels)
# print(clf.centroids_)
sse = calculateSSE(data, labels, clf.centroids_)
# dbValue = calculateDaviesBouldin(data,labels)
sse = round(sse, 4)
csm = round(csm, 4)
dbValue = round(dbValue, 4)
print('SSE=', sse, 'DB=', dbValue, 'CSM=', csm, 'clusters=', k)
# print("Silhouette Coefficient: %0.3f" % csm)
# print('clusters=',k)
return sse, dbValue, csm, k
def preprocessingData(data, N=5):
scaler = MinMaxScaler() # StandardScaler() #
# scaler.fit(data)
# data = scaler.transform(data)
data = scaler.fit_transform(data)
return data
def KMeansModelTrain(dataName, data, N=10):
data = preprocessingData(data)
df = pd.DataFrame()
print('datashape=', data.shape)
columns = ['Dataset', 'Algorithm', 'K', 'tt(s)', 'SSE', 'DB', 'CSM']
for i in range(2, N, 1): # 2 N 1
model = createKMeans(i)
modelName = 'K-Means'
t = time()
model.fit(data)
tt = round(time() - t, 4)
print("\ndataSet:%s model:%s iter i=%d run in %.2fs" % (dataName, modelName, i, tt))
sse, dbValue, csm, k = getModelMeasure(data, model.labels_)
dbName = dataName + str(data.shape)
line = | pd.DataFrame([[dbName, modelName, k, tt, sse, dbValue, csm]], columns=columns) | pandas.DataFrame |
import pandas as pd
from .helpers import pandas_to_json
from .consts import profile_col_names
pd.set_option('display.max_columns', 40)
import sys
# data processing
def process_data(inf_dict, friends_dict, profile_dict, lk_dict, final_data_dict):
# convert dicts to pandas dfs
inf_df = pd.DataFrame(inf_dict, index=[0])
friends_df = pd.DataFrame(friends_dict)
profile_df = | pd.DataFrame(profile_dict, index=[0]) | pandas.DataFrame |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import lib
from pandas._libs.tslibs import (
NaT,
iNaT,
)
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import pandas._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert isinstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.astype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"minute",
"min",
"minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, pd.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).tolist()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
| Timedelta("-1 days 02:34:56.789123456") | pandas.Timedelta |
from datetime import datetime
startTime = datetime.now()
import json
import glob
import numpy as np
import sklearn
import pandas as pd
from io import StringIO
import tensorflow as tf
import tensorflowjs as tfjs
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
EPOCHS = 12
airports = dict()
airlines = dict()
def build_model():
model = keras.Sequential([
keras.layers.Dense(64, activation=tf.nn.relu, input_shape=(X_train.shape[1],)),
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dense(1)
])
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse', optimizer=optimizer, metrics=['mae'])
return model
# load the carriers lookup
this_data = pd.read_csv('flight-data/carriers.csv', skipinitialspace=True, low_memory=False)
airlines_lookup = this_data.set_index('Code')['Description'].to_dict()
# we are going to create twelve different models, one for each month
# of the year. The importing, cleaning, and training all take place
# twelve times and result in twelve different model json files.
for i in range (1, 13):
print('Starting month {}'.format(i))
print('===============================================================\nLoading data for month {}... '.format(i), end=' ')
path = 'flight-data/*/*_' + str(i) + '.csv'
month_data = glob.glob(path)
loaded_data = []
for path in month_data:
this_data = | pd.read_csv(path, skipinitialspace=True, low_memory=False) | pandas.read_csv |
# Automated Antibody Search
# <NAME> - UBC March 2020
# This code uses selenium webdriver to automate search for antibodies based on marker genes found via scRNA-seq
# Input: a dataframe containing uniquely upregulated marker genes for a given cluster
# REQUIREMENTS:
# Download selenium in terminal using the command: pip install selenium
# Download Chrome driver via https://chromedriver.chromium.org/downloads
# Input the file path for chromedriver on LINE 42
###############SET UP#################
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
# create a data frame with info to save regarding astrocyte protein expression
co = ["Gene", "FullName", "Region", "Antibody", "Neurons", "Glia", "pct1", "pct2"]
df = pd.DataFrame(columns=co)
# get user input for which file to open
toOpen = input("Enter the file path of your marker gene csv file: ")
# save whole df
markers = pd.read_csv(toOpen)
# save all gene names to iterate through
allgenes = markers['Unnamed: 0']
# save the pct values
pct1 = markers['pct.1']
pct2 = markers['pct.2']
# get user input for what to name the new file
toSave = input("Enter the name you would like to save the output file as (be sure to include .csv at the end): ")
# this line will open up your controlled chrome window
# input the filepath of your chromedriver here
driver = webdriver.Chrome("/Users/kaitlinsullivan/opt/miniconda3/lib/python3.8/site-packages/chromedriver")
# open protein atlas (or any website of choice)
driver.get("https://www.proteinatlas.org/")
##############SEARCH FOR GENES IN LIST#################
# loop through each each and search for protein expression in the hippocampus and cortex
for z in range(len(allgenes)):
cur_gene = allgenes[z]
cur_pct1 = pct1[z]
cur_pct2 = pct2[z]
# locate the search bar and submit button
# if you are looking to use this code for a different website:
# go to that website and press option+command+U - this will bring up the source code so you can find elements
search_bar = driver.find_element_by_id("searchQuery")
sub_button = driver.find_element_by_id("searchButton")
# search gene names via imported marker gene list
search_bar.clear()
search_bar.send_keys(cur_gene)
sub_button.click()
# get the full gene name
# this searches for the second HTML element of the table that pops up after searching a given gene name
# since the gene of choice may not always be at the top of the list, best to iterate through list
cur_names = driver.find_elements_by_class_name("tda")
# if there are no genes found skip to next iteration
if(len(cur_names)==0):
continue
# iterate through list of gene names to find gene in question
i = 0
exist = True
while(cur_names[i].text != cur_gene):
i+=1
if(i>=len(cur_names)):
exist = False
break
# if the search results do not bring up the gene name, skip to next iteration
if(exist != True):
continue
#save the full name of the gene
cur_name = cur_names[i+1].text
##############GET BRAIN TISSUE INFO###################
# select brain tissue protein expression by finding correct gene HTML element
gene_xpath = "//a[contains(@href, '-" + cur_gene + "/brain')]"
brain = driver.find_elements_by_xpath(gene_xpath)
# if there is no brain tissue info skip
if(len(brain)==0):
cur_region = "NA"
cur_a = "NA"
cur_neur = "NA"
cur_glia = "NA"
# write empty line for gene
cur = pd.DataFrame([[cur_gene, cur_name, cur_region, cur_ab, cur_neur, cur_glia, cur_pct1, cur_pct2]], columns=co)
df.append(cur, ignore_index=True)
continue
brain[0].click()
# select tissue menu
tissue = driver.find_element_by_class_name("tissue_menu_opener")
tissue.click()
# save all tissue types in a list
tissue = driver.find_elements_by_xpath("//div/span/a/span")
# convert from web elements to strings
tissue_text = [None]*len(tissue)
for y in range(len(tissue)):
tissue_text[y] = tissue[y].text
##############CEREBRAL CORTEX###################
# save cortical info
if("CEREBRAL CORTEX" in tissue_text):
# click on the hippocampus
cortex = driver.find_elements_by_xpath("//a[contains(@href, '/cerebral+cortex')]")
cortex[0].click()
cur_region = "Cortex"
# save antibody names as a list
ab_h = driver.find_elements_by_class_name("head_nohex")
##### if there are no antibodies
if(len(ab_h)==0):
cur_ab = "NA"
cur_neur = "NA"
cur_glia = "NA"
# write empty line for gene in HC
cur = pd.DataFrame([[cur_gene, cur_name, cur_region, cur_ab, cur_neur, cur_glia, cur_pct1, cur_pct2]], columns=co)
df = df.append(cur, ignore_index=True)
##### if there are antibodies
else:
# find parent element of table holding values
parent = driver.find_element_by_xpath("//table[@class='border dark']")
# find the values
cells = parent.find_elements_by_tag_name("td")
for x in range(len(ab_h)):
# manually check that there are 4 variables as assumed
if(len(cells)<4):
# save line to table
cur = | pd.DataFrame([[cur_gene, cur_name, cur_region, cur_ab, "check", "check", cur_pct1, cur_pct2]], columns=co) | pandas.DataFrame |
import pandas as pd
import logging
import electricitylci.model_config as config
formatter = logging.Formatter(
"%(levelname)s:%(filename)s:%(funcName)s:%(message)s"
)
logging.basicConfig(
format="%(levelname)s:%(filename)s:%(funcName)s:%(message)s",
level=logging.INFO,
)
logger = logging.getLogger("electricitylci")
def get_generation_process_df(regions=None, **kwargs):
"""
Create a dataframe of emissions from power generation by fuel type in each
region. kwargs would include the upstream emissions dataframe (upstream_df) if
upstream emissions are being included.
Parameters
----------
regions : str, optional
Regions to include in the analysis (the default is None, which uses the value
read from a settings YAML file). Other options include "eGRID", "NERC", "BA",
"US", "FERC", and "EIA"
Returns
-------
DataFrame
Each row represents information about a single emission from a fuel category
in a single region. Columns are:
'Subregion', 'FuelCategory', 'FlowName', 'FlowUUID', 'Compartment',
'Year', 'Source', 'Unit', 'ElementaryFlowPrimeContext',
'TechnologicalCorrelation', 'TemporalCorrelation', 'DataCollection',
'Emission_factor', 'Reliability_Score', 'GeographicalCorrelation',
'GeomMean', 'GeomSD', 'Maximum', 'Minimum'
"""
from electricitylci.generation import create_generation_process_df
from electricitylci.combinator import concat_clean_upstream_and_plant
if config.model_specs.include_renewable_generation is True:
generation_process_df=get_gen_plus_netl()
else:
generation_process_df = create_generation_process_df()
if config.model_specs.include_netl_water is True:
import electricitylci.plant_water_use as water
water_df = water.generate_plant_water_use(config.model_specs.eia_gen_year)
generation_process_df=concat_clean_upstream_and_plant(generation_process_df,water_df)
if config.model_specs.include_upstream_processes is True:
try:
upstream_df = kwargs['upstream_df']
upstream_dict = kwargs['upstream_dict']
except KeyError:
print(
"A kwarg named 'upstream_dict' must be included if include_upstream_processes"
"is True"
)
# upstream_dict = write_upstream_process_database_to_dict(
# upstream_df
# )
# upstream_dict = write_upstream_dicts_to_jsonld(upstream_dict)
combined_df, canadian_gen = combine_upstream_and_gen_df(
generation_process_df, upstream_df
)
gen_plus_fuels = add_fuels_to_gen(
generation_process_df, upstream_df, canadian_gen, upstream_dict
)
else:
import electricitylci.import_impacts as import_impacts
canadian_gen_df = import_impacts.generate_canadian_mixes(generation_process_df)
generation_process_df = pd.concat([generation_process_df, canadian_gen_df], ignore_index=True)
gen_plus_fuels=generation_process_df
#This change has been made to accomodate the new method of generating
#consumption mixes for FERC regions. They now pull BAs to provide
#a more accurate inventory. The tradeoff here is that it's no longer possible
#to make a FERC region generation mix and also provide the consumption mix.
#Or it could be possible but would requir running through aggregate twice.
# generation_process_df = aggregate_gen(
# gen_plus_fuels, subregion=regions
# )
if regions is None:
regions = config.model_specs.regional_aggregation
if regions in ["BA","FERC","US"]:
generation_process_df = aggregate_gen(
gen_plus_fuels, subregion="BA"
)
else:
generation_process_df = aggregate_gen(
gen_plus_fuels, subregion=regions
)
return generation_process_df
def get_generation_mix_process_df(regions=None):
"""
Create a dataframe of generation mixes by fuel type in each subregion.
This function imports and uses the parameter 'replace_egrid' and
'gen_mix_from_model_generation_data' from model_config.py. If 'replace_egrid'
is true or the specified 'regions' is true, then the generation mix will
come from EIA 923 data. If 'replace_egrid' is false then the generation
mix will either come from the eGRID reference data
('gen_mix_from_model_generation_data' is false) or from the generation data
from this model ('gen_mix_from_model_generation_data' is true).
Parameters
----------
regions : str, optional
Which regions to include (the default is 'all', which includes all eGRID
subregions)
Returns
-------
DataFrame
Sample output:
>>> all_gen_mix_db.head()
Subregion FuelCategory Electricity NERC Generation_Ratio
0 AKGD COAL 5.582922e+05 ASCC 0.116814
22 AKGD OIL 3.355753e+05 ASCC 0.070214
48 AKGD GAS 3.157474e+06 ASCC 0.660651
90 AKGD HYDRO 5.477350e+05 ASCC 0.114605
114 AKGD BIOMASS 5.616577e+04 ASCC 0.011752
"""
from electricitylci.egrid_filter import (
electricity_for_selected_egrid_facilities,
)
from electricitylci.generation_mix import (
create_generation_mix_process_df_from_model_generation_data,
create_generation_mix_process_df_from_egrid_ref_data,
)
from electricitylci.eia923_generation import build_generation_data
if regions is None:
regions = config.model_specs.regional_aggregation
if config.model_specs.replace_egrid or regions in ["BA","FERC","US"]:
# assert regions == 'BA' or regions == 'NERC', 'Regions must be BA or NERC'
if regions in ["BA","FERC","US"] and not config.model_specs.replace_egrid:
logger.info(
f"EIA923 generation data is being used for the generation mix "
f"despite replace_egrid = False. The reference eGrid electricity "
f"data cannot be reorgnznied to match BA or FERC regions. For "
f"the US region, the function for generating US mixes does not "
f"support aggregating to the US."
)
print("EIA923 generation data is used when replacing eGRID")
generation_data = build_generation_data(
generation_years=[config.model_specs.eia_gen_year]
)
generation_mix_process_df = create_generation_mix_process_df_from_model_generation_data(
generation_data, regions
)
else:
if config.model_specs.gen_mix_from_model_generation_data:
generation_mix_process_df = create_generation_mix_process_df_from_model_generation_data(
electricity_for_selected_egrid_facilities, regions
)
else:
generation_mix_process_df = create_generation_mix_process_df_from_egrid_ref_data(
regions
)
return generation_mix_process_df
def write_generation_process_database_to_dict(gen_database, regions=None):
"""
Create olca formatted dictionaries of individual processes
Parameters
----------
gen_database : DataFrame
Each row represents information about a single emission from a fuel category
in a single region.
regions : str, optional
Not currently used (the default is 'all', which [default_description])
Returns
-------
dict
A dictionary of dictionaries, each of which contains information about
emissions from a single fuel type in a single region.
"""
from electricitylci.generation import olcaschema_genprocess
if regions is None:
regions = config.model_specs.regional_aggregation
gen_dict = olcaschema_genprocess(gen_database, subregion=regions)
return gen_dict
def write_generation_mix_database_to_dict(
genmix_database, gen_dict, regions=None
):
from electricitylci.generation_mix import olcaschema_genmix
if regions is None:
regions = config.model_specs.regional_aggregation
if regions in ["FERC","US","BA"]:
genmix_dict = olcaschema_genmix(
genmix_database, gen_dict, subregion="BA"
)
else:
genmix_dict = olcaschema_genmix(
genmix_database, gen_dict, subregion=regions
)
return genmix_dict
def write_surplus_pool_and_consumption_mix_dict():
"""
Create olca formatted dictionaries for the consumption mix as calculated by
consumption_mix.py. Note that this funcion directly pulls the dataframes,
converts the data into the dictionary and then returns the dictionary.
Returns
-------
dictionary
The surplus pool and consumption mixes for the various regions.
"""
from electricitylci.consumption_mix import surplus_dict
from electricitylci.consumption_mix import consumption_dict
surplus_pool_and_con_mix = {**surplus_dict, **consumption_dict}
return surplus_pool_and_con_mix
def write_distribution_dict():
from electricitylci.distribution import distribution_mix_dictionary
return distribution_mix_dictionary()
def write_process_dicts_to_jsonld(*process_dicts):
"""
Send one or more process dictionaries to be written to json-ld
"""
from electricitylci.olca_jsonld_writer import write
all_process_dicts = dict()
for d in process_dicts:
all_process_dicts = {**all_process_dicts, **d}
olca_dicts = write(all_process_dicts, config.model_specs.namestr)
return olca_dicts
def get_upstream_process_df(eia_gen_year):
"""
Automatically load all of the upstream emissions data from the various
modules. Will return a dataframe with upstream emissions from
coal, natural gas, petroleum, nuclear, and plant construction.
"""
import electricitylci.coal_upstream as coal
import electricitylci.natural_gas_upstream as ng
import electricitylci.petroleum_upstream as petro
import electricitylci.nuclear_upstream as nuke
import electricitylci.power_plant_construction as const
from electricitylci.combinator import concat_map_upstream_databases
print("Generating upstream inventories...")
coal_df = coal.generate_upstream_coal(eia_gen_year)
ng_df = ng.generate_upstream_ng(eia_gen_year)
petro_df = petro.generate_petroleum_upstream(eia_gen_year)
nuke_df = nuke.generate_upstream_nuc(eia_gen_year)
const = const.generate_power_plant_construction(eia_gen_year)
#coal and ng already conform to mapping so no mapping needed
upstream_df = concat_map_upstream_databases(eia_gen_year,
petro_df, nuke_df, const
)
upstream_df=pd.concat([upstream_df,coal_df,ng_df],sort=False,ignore_index=True)
return upstream_df
def write_upstream_process_database_to_dict(upstream_df):
"""
Convert the upstream dataframe generated by get_upstream_process_df to
dictionaries to be written to json-ld.
Parameters
----------
upstream_df : dataframe
Combined dataframe as generated by gen_upstream_process_df
Returns
-------
dictionary
"""
import electricitylci.upstream_dict as upd
print("Writing upstream processes to dictionaries")
upstream_dicts = upd.olcaschema_genupstream_processes(upstream_df)
return upstream_dicts
def write_upstream_dicts_to_jsonld(upstream_dicts):
"""
Write the upstream dictionary to jsonld.
Parameters
----------
upstream_dicts : dictionary
The dictioanary of upstream unit processes generated by
electricitylci.write_upstream_database_to_dict.
"""
upstream_dicts = write_process_dicts_to_jsonld(upstream_dicts)
return upstream_dicts
def combine_upstream_and_gen_df(gen_df, upstream_df):
"""
Combine the generation and upstream dataframes into a single dataframe.
The emissions represented here are the annutal emissions for all power
plants. This dataframe would be suitable for further analysis.
Parameters
----------
gen_df : dataframe
The generator dataframe, generated by get_gen_plus_netl or
get_generation_process_df. Note that get_generation_process_df returns
two dataframes. The intention would be to send the second returned
dataframe (plant-level emissions) to this routine.
upstream_df : dataframe
The upstream dataframe, generated by get_upstream_process_df
"""
import electricitylci.combinator as combine
import electricitylci.import_impacts as import_impacts
print("Combining upstream and generation inventories")
combined_df = combine.concat_clean_upstream_and_plant(gen_df, upstream_df)
canadian_gen = import_impacts.generate_canadian_mixes(combined_df)
combined_df = pd.concat([combined_df, canadian_gen], ignore_index=True)
return combined_df, canadian_gen
def get_gen_plus_netl():
"""
This will combine the netl life cycle data for solar, solar thermal,
geothermal, wind, and hydro and will include impacts from construction, etc.
that would be omitted from the regular sources of emissions.
It then generates power plant emissions. The two different dataframes are
combined to provide a single dataframe representing annual emissions or
life cycle emissions apportioned over the appropriate number of years for
all reporting power plants.
Returns
-------
dataframe
"""
import electricitylci.generation as gen
from electricitylci.combinator import (
concat_map_upstream_databases,
concat_clean_upstream_and_plant,
)
import electricitylci.geothermal as geo
import electricitylci.solar_upstream as solar
import electricitylci.wind_upstream as wind
import electricitylci.hydro_upstream as hydro
import electricitylci.solar_thermal_upstream as solartherm
eia_gen_year = config.model_specs.eia_gen_year
print(
"Generating inventories for geothermal, solar, wind, hydro, and solar thermal..."
)
geo_df = geo.generate_upstream_geo(eia_gen_year)
solar_df = solar.generate_upstream_solar(eia_gen_year)
wind_df = wind.generate_upstream_wind(eia_gen_year)
hydro_df = hydro.generate_hydro_emissions()
solartherm_df = solartherm.generate_upstream_solarthermal(eia_gen_year)
netl_gen = concat_map_upstream_databases(eia_gen_year,
geo_df, solar_df, wind_df, solartherm_df,
)
netl_gen["DataCollection"] = 5
netl_gen["GeographicalCorrelation"] = 1
netl_gen["TechnologicalCorrelation"] = 1
netl_gen["ReliabilityScore"] = 1
netl_gen= | pd.concat([netl_gen,hydro_df[netl_gen.columns]],ignore_index=True,sort=False) | pandas.concat |
# --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore')
#Reading file
bank_data = | pd.read_csv(path) | pandas.read_csv |
from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from surprise import NMF
from surprise import KNNWithMeans
from surprise import accuracy
from surprise.model_selection import KFold
from surprise import SVD, SVDpp
from surprise import Dataset
from surprise.model_selection import cross_validate, train_test_split
from surprise import Reader
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from surprise import KNNBasic
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
import os
import json
from flask import Flask
from flask_cors import CORS
app = FlaskAPI(__name__)
CORS(app)
notes = {
0: 'do the shopping',
1: 'build the codez',
2: 'paint the door',
}
algo = SVD()
def note_repr(key):
return {
'url': request.host_url.rstrip('/') + url_for('notes_detail', key=key),
'text': notes[key]
}
@app.route("/", methods=['GET', 'POST'])
def notes_list():
"""
List or create notes.
"""
if request.method == 'POST':
note = str(request.data.get('text', ''))
idx = max(notes.keys()) + 1
notes[idx] = note
return note_repr(idx), status.HTTP_201_CREATED
# request.method == 'GET'
return [note_repr(idx) for idx in sorted(notes.keys())]
@app.route("/<int:key>/", methods=['GET', 'PUT', 'DELETE'])
def notes_detail(key):
"""
Retrieve, update or delete note instances.
"""
if request.method == 'PUT':
note = str(request.data.get('text', ''))
notes[key] = note
return note_repr(key)
elif request.method == 'DELETE':
notes.pop(key, None)
return '', status.HTTP_204_NO_CONTENT
# request.method == 'GET'
if key not in notes:
raise exceptions.NotFound()
return note_repr(key)
@app.route("/getFutureSubjects/", methods=['GET', 'POST'])
def getFutureSubjects():
data = pd.read_csv('hack.csv')
l_data = request.json
for l in l_data:
learner_id = l['learnerID']
learner_data = data[data["LearnerID"] == learner_id].head(1)
learner_schoolid = learner_data['Schoolid'].values[0]
learner_curr_year = learner_data['CurretYearName'].values[0]
data['year'] = data['MasterYearName'].str.slice(5)
data['year'] = pd.to_numeric(data['year'], errors='coerce').fillna(0).astype(np.int64)
data['current_year'] = data['CurretYearName'].str.slice(5)
data['current_year'] = pd.to_numeric(data['current_year'], errors='coerce').fillna(0).astype(np.int64)
current_yr = data[data["LearnerID"] == learner_id].sort_values(['year'], ascending=[0]).iloc[[0],:]["year"].values[0] + 1
past_data = data[(data['year'] <= current_yr) & (data['year'] != 0) & (data['current_year'] > current_yr) & (data['LearnerID'] != learner_id)]
df = past_data.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index()
learner_data = data[data["LearnerID"] == learner_id]
learner_pivot = learner_data.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index()
for l in l_data:
print('asdasdasdasd')
print(l['subject'])
print(l['marks'])
learner_pivot[l['subject']] = l['marks']
final_pivot = pd.concat([learner_pivot, df], ignore_index=False, sort=True).fillna(0)
final_pivot[final_pivot["LearnerID"] == learner_id]
model_knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=5, n_jobs=-1).fit(df)
values, indexes = model_knn.kneighbors(final_pivot[final_pivot["LearnerID"] == learner_id].values.reshape(1,-1))
similar_leaners = dict(zip(indexes[0], values[0]))
final_learner_pivot = final_pivot[final_pivot["LearnerID"] == learner_id]
sorted_leaners = [(k, similar_leaners[k]) for k in sorted(similar_leaners, key=similar_leaners.get, reverse=True)]
index_arr = []
values_arr = []
for l in sorted_leaners:
index_arr.append(l[0])
values_arr.append(l[1])
similar_learners_df = df.loc[index_arr, :]
similar_learners_df['similarity'] = values_arr
final_learner_pivot['similarity'] = 1
similar_learners_without_self = similar_learners_df
similar_learners_df = pd.concat([similar_learners_df,final_learner_pivot], ignore_index=False, sort=False).fillna(0)
learners = data[(data['year'] == (current_yr + 2)) & (data['LearnerID'].isin(similar_learners_without_self['LearnerID'].values))]
#similar_learners_df
subjects = learners.MasterSubjectName.unique()
learners = learners[['LearnerID', 'Points.1', 'SchoolSubjectName']].rename(columns={"Points.1": "Marks"})
arr = []
# arr.append(learners.to_json(orient='records'))
# grid_obj = {
# "grid":str(learners.to_json(orient='records'))
# }
# arr.append(grid_obj)
#return similar_learners_df.to_json(orient='records')
for s in subjects:
id = str(learner_id) + '-' + str(learner_schoolid)
subject = learner_curr_year + '-' + s
obj = {
"learnerID": learner_id,
"subject": subject,
"marks": (algo.predict(id, subject)[3] * 10)
}
arr.append(obj)
return json.dumps(arr)
@app.route("/getSubjects/<int:learner_id>/", methods=['GET', 'PUT', 'DELETE'])
def getSubjects(learner_id):
data = pd.read_csv('hack.csv')
learner_data = data[data["LearnerID"] == learner_id].head(1)
learner_schoolid = learner_data['Schoolid'].values[0]
learner_curr_year = learner_data['CurretYearName'].values[0]
data['year'] = data['MasterYearName'].str.slice(5)
data['year'] = pd.to_numeric(data['year'], errors='coerce').fillna(0).astype(np.int64)
data['current_year'] = data['CurretYearName'].str.slice(5)
data['current_year'] = pd.to_numeric(data['current_year'], errors='coerce').fillna(0).astype(np.int64)
current_yr = data[data["LearnerID"] == learner_id].sort_values(['year'], ascending=[0]).iloc[[0],:]["year"].values[0]
past_data = data[(data['year'] <= current_yr) & (data['year'] != 0) & (data['current_year'] > current_yr) & (data['LearnerID'] != learner_id)]
df = past_data.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index()
# recently added
learner_data = data[data["LearnerID"] == learner_id]
# recently added done
learner_pivot = learner_data.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index()
final_pivot = pd.concat([learner_pivot, df], ignore_index=False, sort=True).fillna(0)
final_pivot[final_pivot["LearnerID"] == learner_id]
model_knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=5, n_jobs=-1)
model_knn.fit(df)
values, indexes = model_knn.kneighbors(final_pivot[final_pivot["LearnerID"] == learner_id].values.reshape(1,-1))
similar_leaners = dict(zip(indexes[0], values[0]))
final_learner_pivot = final_pivot[final_pivot["LearnerID"] == learner_id]
sorted_leaners = [(k, similar_leaners[k]) for k in sorted(similar_leaners, key=similar_leaners.get, reverse=True)]
index_arr = []
values_arr = []
for l in sorted_leaners:
index_arr.append(l[0])
values_arr.append(l[1])
similar_learners_df = df.loc[index_arr, :]
similar_learners_df['similarity'] = values_arr
final_learner_pivot['similarity'] = 1
similar_learners_without_self = similar_learners_df
similar_learners_df = pd.concat([similar_learners_df,final_learner_pivot], ignore_index=False, sort=False).fillna(0)
learners = data[(data['year'] == (current_yr + 1)) & (data['LearnerID'].isin(similar_learners_without_self['LearnerID'].values))]
#similar_learners_df
subjects = learners.MasterSubjectName.unique()
learners = learners[['LearnerID', 'Points.1', 'SchoolSubjectName']].rename(columns={"Points.1": "Marks"})
arr = []
# arr.append(learners.to_json(orient='records'))
# grid_obj = {
# "grid":str(learners.to_json(orient='records'))
# }
# arr.append(grid_obj)
#return similar_learners_df.to_json(orient='records')
for s in subjects:
id = str(learner_id) + '-' + str(learner_schoolid)
subject = learner_curr_year + '-' + s
obj = {
"learnerID": learner_id,
"subject": subject,
"marks": (algo.predict(id, subject)[3] * 10)
}
arr.append(obj)
return json.dumps(arr)
@app.route("/getLearners/<int:learner_id>/", methods=['GET', 'PUT', 'DELETE'])
def getLearners(learner_id):
data = pd.read_csv('hack.csv')
learner_data = data[data["LearnerID"] == learner_id].head(1)
learner_schoolid = learner_data['Schoolid'].values[0]
learner_curr_year = learner_data['CurretYearName'].values[0]
data['year'] = data['MasterYearName'].str.slice(5)
data['year'] = pd.to_numeric(data['year'], errors='coerce').fillna(0).astype(np.int64)
data['current_year'] = data['CurretYearName'].str.slice(5)
data['current_year'] = pd.to_numeric(data['current_year'], errors='coerce').fillna(0).astype(np.int64)
current_yr = data[data["LearnerID"] == learner_id].sort_values(['year'], ascending=[0]).iloc[[0],:]["year"].values[0]
past_data = data[(data['year'] <= current_yr) & (data['year'] != 0) & (data['current_year'] > current_yr) & (data['LearnerID'] != learner_id)]
df = past_data.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index()
learner_pivot = learner_data.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index()
final_pivot = pd.concat([learner_pivot, df], ignore_index=False, sort=True).fillna(0)
final_pivot[final_pivot["LearnerID"] == learner_id]
model_knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=5, n_jobs=-1)
model_knn.fit(df)
values, indexes = model_knn.kneighbors(final_pivot[final_pivot["LearnerID"] == learner_id].values.reshape(1,-1))
similar_leaners = dict(zip(indexes[0], values[0]))
final_learner_pivot = final_pivot[final_pivot["LearnerID"] == learner_id]
sorted_leaners = [(k, similar_leaners[k]) for k in sorted(similar_leaners, key=similar_leaners.get, reverse=True)]
index_arr = []
values_arr = []
for l in sorted_leaners:
index_arr.append(l[0])
values_arr.append(l[1])
similar_learners_df = df.loc[index_arr, :]
similar_learners_df['similarity'] = values_arr
final_learner_pivot['similarity'] = 1
similar_learners_without_self = similar_learners_df
similar_learners_df = pd.concat([similar_learners_df,final_learner_pivot], ignore_index=False, sort=False).fillna(0)
learners = data[(data['year'] == (current_yr + 1)) & (data['LearnerID'].isin(similar_learners_without_self['LearnerID'].values))]
#similar_learners_df
subjects = learners.MasterSubjectName.unique()
#learners = learners[['LearnerID', 'Points.1', 'SchoolSubjectName']]
learners = learners[['LearnerID', 'Points.1', 'SchoolSubjectName']].rename(columns={"Points.1": "Marks"})
arr = []
arr.append(learners.to_json(orient='records'))
# grid_obj = {
# "grid":str(learners.to_json(orient='records'))
# }
# arr = []
# grid_obj = {
# "grid":learners.pivot_table(index = ['LearnerID'], values = 'Points.1', columns = 'MasterSubjectName').fillna(0).reset_index().to_json(orient='records')
# for s in learners:
# obj ={
# "learnerID": learner_id,
# "subject": s,
# "marks":s
# }
# arr.append(grid_obj)
# obj = {
# "LearnerID": learner_id
# }
#arr.append(obj)
#return similar_learners_df.to_json(orient='records')
# for s in subjects:
# id = str(learner_id) + '-' + str(learner_schoolid)
# subject = learner_curr_year + '-' + s
# obj = {
# "learnerID": learner_id,
# "subject": subject,
# "marks": algo.predict(id, subject)
# }
# arr.append(obj)
# return json.dumps(arr)
return learners.to_json(orient='records')
# return json.dumps(learners)
# return "".join(map(str, subjects))
@app.route("/getLearnersNew/<int:learner_id>/", methods=['GET', 'PUT', 'DELETE'])
def getLearnersNew(learner_id):
return getSubjects(learner_id)
#getSubjects(233381)
@app.route("/getLearners1/<int:learner_id>/", methods=['GET', 'PUT', 'DELETE'])
def getLearners1(learner_id):
#learner_id = 232301
data_df = | pd.read_csv('hack.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
AIDeveloper
---------
@author: maikherbig
"""
import os,sys,gc
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'#suppress warnings/info from tensorflow
if not sys.platform.startswith("win"):
from multiprocessing import freeze_support
freeze_support()
# Make sure to get the right icon file on win,linux and mac
if sys.platform=="darwin":
icon_suff = ".icns"
else:
icon_suff = ".ico"
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtWidgets, QtGui
from pyqtgraph import Qt
import aid_start
dir_root = os.path.dirname(aid_start.__file__)#ask the module for its origin
dir_settings = os.path.join(dir_root,"aid_settings.json")#dir to settings
Default_dict = aid_start.get_default_dict(dir_settings)
#try:
# splashapp = QtWidgets.QApplication(sys.argv)
# #splashapp.setWindowIcon(QtGui.QIcon("."+os.sep+"art"+os.sep+Default_dict["Icon theme"]+os.sep+"main_icon_simple_04_256.ico"))
# # Create and display the splash screen
# splash_pix = os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff)
# splash_pix = QtGui.QPixmap(splash_pix)
# #splash_pix = QtGui.QPixmap("."+os.sep+"art"+os.sep+Default_dict["Icon theme"]+os.sep+"main_icon_simple_04_256"+icon_suff)
# splash = QtWidgets.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
# splash.setMask(splash_pix.mask())
# splash.show()
#except:
# pass
#BEFORE importing tensorflow or anything from keras: make sure the keras.json has
#certain properties
keras_json_path = os.path.expanduser('~')+os.sep+'.keras'+os.sep+'keras.json'
if not os.path.isdir(os.path.expanduser('~')+os.sep+'.keras'):
os.mkdir(os.path.expanduser('~')+os.sep+'.keras')
aid_start.banner() #show a fancy banner in console
aid_start.keras_json_check(keras_json_path)
import traceback,shutil,re,ast,io,platform
import h5py,json,time,copy,urllib,datetime
from stat import S_IREAD,S_IRGRP,S_IROTH,S_IWRITE,S_IWGRP,S_IWOTH
import tensorflow as tf
from tensorboard import program
from tensorboard import default
from tensorflow.python.client import device_lib
devices = device_lib.list_local_devices()
device_types = [devices[i].device_type for i in range(len(devices))]
#Get the number of CPU cores and GPUs
cpu_nr = os.cpu_count()
gpu_nr = device_types.count("GPU")
print("Nr. of GPUs detected: "+str(gpu_nr))
print("Found "+str(len(devices))+" device(s):")
print("------------------------")
for i in range(len(devices)):
print("Device "+str(i)+": "+devices[i].name)
print("Device type: "+devices[i].device_type)
print("Device description: "+devices[i].physical_device_desc)
print("------------------------")
#Split CPU and GPU into two lists of devices
devices_cpu = []
devices_gpu = []
for dev in devices:
if dev.device_type=="CPU":
devices_cpu.append(dev)
elif dev.device_type=="GPU":
devices_gpu.append(dev)
else:
print("Unknown device type:"+str(dev)+"\n")
import numpy as np
rand_state = np.random.RandomState(117) #to get the same random number on diff. PCs
from scipy import ndimage,misc
from sklearn import metrics,preprocessing
import PIL
import dclab
import cv2
import pandas as pd
import openpyxl,xlrd
import psutil
from keras.models import model_from_json,model_from_config,load_model,clone_model
from keras import backend as K
if 'GPU' in device_types:
keras_gpu_avail = K.tensorflow_backend._get_available_gpus()
if len(keras_gpu_avail)>0:
print("Following GPU is used:")
print(keras_gpu_avail)
print("------------------------")
else:
print("TensorFlow detected GPU, but Keras didn't")
print("------------------------")
from keras.preprocessing.image import load_img
from keras.utils import np_utils,multi_gpu_model
from keras.utils.conv_utils import convert_kernel
import keras_metrics #side package for precision, recall etc during training
global keras_metrics
import model_zoo
from keras2onnx import convert_keras
from onnx import save_model as save_onnx
import aid_img, aid_dl, aid_bin
import aid_frontend
from partial_trainability import partial_trainability
import aid_imports
VERSION = "0.2.3" #Python 3.5.6 Version
model_zoo_version = model_zoo.__version__()
print("AIDeveloper Version: "+VERSION)
print("model_zoo.py Version: "+model_zoo.__version__())
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
tooltips = aid_start.get_tooltips()
class MyPopup(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
class WorkerSignals(QtCore.QObject):
'''
Code inspired from here: https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
history
`dict` containing keras model history.history resulting from .fit
'''
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(object)
progress = QtCore.pyqtSignal(int)
history = QtCore.pyqtSignal(dict)
class Worker(QtCore.QRunnable):
'''
Code inspired/copied from: https://www.learnpyqt.com/courses/concurrent-execution/multithreading-pyqt-applications-qthreadpool/
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progress_callback'] = self.signals.progress
self.kwargs['history_callback'] = self.signals.history
@QtCore.pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi()
def setupUi(self):
aid_frontend.setup_main_ui(self,gpu_nr)
def retranslateUi(self):
aid_frontend.retranslate_main_ui(self,gpu_nr,VERSION)
def dataDropped(self, l):
#If there is data stored on ram tell user that RAM needs to be refreshed!
if len(self.ram)>0:
self.statusbar.showMessage("Newly added data is not yet in RAM. Only RAM data will be used. Use ->'File'->'Data to RAM now' to update RAM",5000)
#l is a list of some filenames (.rtdc) or folders (containing .jpg, jpeg, .png)
#Iterate over l and check if it is a folder or a file (directory)
isfile = [os.path.isfile(str(url)) for url in l]
isfolder = [os.path.isdir(str(url)) for url in l]
#####################For folders with images:##########################
#where are folders?
ind_true = np.where(np.array(isfolder)==True)[0]
foldernames = list(np.array(l)[ind_true]) #select the indices that are valid
#On mac, there is a trailing / in case of folders; remove them
foldernames = [os.path.normpath(url) for url in foldernames]
basename = [os.path.basename(f) for f in foldernames]
#Look quickly inside the folders and ask the user if he wants to convert
#to .rtdc (might take a while!)
if len(foldernames)>0: #User dropped (also) folders (which may contain images)
# filecounts = []
# for i in range(len(foldernames)):
# url = foldernames[i]
# files = os.listdir(url)
# files_full = [os.path.join(url,files[i]) for i in range(len(files))]
# filecounts.append(len([f for f in files_full if os.path.isfile(f)]))
# Text = []
# for b,n in zip(basename,filecounts):
# Text.append(b+": "+str(n)+" images")
# Text = "\n".join(Text)
Text = "Images from single folders are read and saved to individual \
.rtdc files with the same name like the corresponding folder.<b>If \
you have RGB images you can either save the full RGB information, \
or do a conversion to Grayscale (saves some diskspace but information \
about color is lost). RGB is recommended since AID will automatically\
do the conversion to grayscale later if required.<b>If you have \
Grayscale images, a conversion to RGB will just copy the info to all \
channels, which allows you to use RGB-mode and Grayscale-mode lateron."
Text = Text+"\nImages from following folders will be converted:\n"+"\n".join(basename)
#Show the user a summary with all the found folders and how many files are
#contained. Ask if he want to convert
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Should the images of the chosen folder(s)\
be converted to .rtdc using <b>RGB</b> or <b>Grayscale</b> format? <b>\
(RGB is recommended!)</b> Either option might take some time. You can \
reuse the .rtdc file next time.</p></body></html>"
msg.setText(text)
msg.setDetailedText(Text)
msg.setWindowTitle("Format for conversion to .rtdc (RGB/Grayscale)")
msg.addButton(QtGui.QPushButton('Convert to Grayscale'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Convert to RGB'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
#Conversion of images in folders is (almost) independent from what
#is going to be fitted (So I leave the option menu still!)
#In options: Color Mode one can still use RGB mode and export here as
#Grayscale (but this would actually not work since RGB information is lost).
#The other way around works. Therefore it is recommended to export RGB!
if retval==0:
color_mode = "Grayscale"
channels = 1
elif retval==1:
color_mode = "RGB"
channels = 3
else:
return
self.statusbar.showMessage("Color mode' "+color_mode+"' is used",5000)
url_converted = []
for i in range(len(foldernames)):
url = foldernames[i]
print("Start converting images in\n"+url)
#try:
#get a list of files inside this directory:
images,pos_x,pos_y = [],[],[]
for root, dirs, files in os.walk(url):
for file in files:
try:
path = os.path.join(root, file)
img = load_img(path,color_mode=color_mode.lower()) #This uses PIL and supports many many formats!
images.append(np.array(img)) #append nice numpy array to list
#create pos_x and pos_y
pos_x.append( int(np.round(img.width/2.0,0)) )
pos_y.append( int(np.round(img.height/2.0,0)) )
except:
pass
#Thanks to andko76 for pointing that unequal image sizes cause an error:
#https://github.com/maikherbig/AIDeveloper/issues/1
#Check that all images have the same size
# img_shape_errors = 0
# text_error = "Images have unequal dimensions:"
# img_h = [a.shape[0] for a in images]
# img_h_uni = len(np.unique(img_h))
# if img_h_uni!=1:
# text_error += "\n- found unequal heights"
# img_shape_errors=1
# img_w = [a.shape[1] for a in images]
# img_w_uni = len(np.unique(img_w))
# if img_w_uni!=1:
# text_error += "\n- found unequal widths"
# img_shape_errors=1
# img_c = [len(a.shape) for a in images]
# img_c_uni = len(np.unique(img_c))
# if img_c_uni!=1:
# text_error += "\n- found unequal numbers of channels"
# img_shape_errors=1
# #If there were issues detected, show error message
# if img_shape_errors==1:
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Warning)
# msg.setText(str(text_error))
# msg.setWindowTitle("Error: Unequal image shapes")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# return
#Get a list of occuring image dimensions (width and height)
img_shape = [a.shape[0] for a in images] + [a.shape[1] for a in images]
dims = np.unique(img_shape)
#Get a list of occurences of image shapes
img_shape = [str(a.shape[0])+" x "+str(a.shape[1]) for a in images]
occurences = np.unique(img_shape,return_counts=True)
#inform user if there is more than one img shape
if len(occurences[0])>1 or len(dims)>1:
text_detail = "Path: "+url
text_detail += "\nFollowing image shapes are present"
for i in range(len(occurences[0])):
text_detail+="\n- "+str(occurences[1][i])+" times: "+str(occurences[0][i])
self.popup_imgRes = QtGui.QDialog()
self.popup_imgRes_ui = aid_frontend.popup_imageLoadResize()
self.popup_imgRes_ui.setupUi(self.popup_imgRes) #open a popup to show options for image resizing (make image equally sized)
#self.popup_imgRes.setWindowModality(QtCore.Qt.WindowModal)
self.popup_imgRes.setWindowModality(QtCore.Qt.ApplicationModal)
#Insert information into textBrowser
self.popup_imgRes_ui.textBrowser_imgResize_occurences.setText(text_detail)
Image_import_dimension = Default_dict["Image_import_dimension"]
self.popup_imgRes_ui.spinBox_ingResize_h_1.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_h_2.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_w_1.setValue(Image_import_dimension)
self.popup_imgRes_ui.spinBox_ingResize_w_2.setValue(Image_import_dimension)
Image_import_interpol_method = Default_dict["Image_import_interpol_method"]
index = self.popup_imgRes_ui.comboBox_resizeMethod.findText(Image_import_interpol_method, QtCore.Qt.MatchFixedString)
if index >= 0:
self.popup_imgRes_ui.comboBox_resizeMethod.setCurrentIndex(index)
#Define function for the OK button:
def popup_imgRes_ok(images,channels,pos_x,pos_y):
print("Start resizing operation")
#Get info from GUI
final_h = int(self.popup_imgRes_ui.spinBox_ingResize_h_1.value())
print("Height:"+str(final_h))
final_w = int(self.popup_imgRes_ui.spinBox_ingResize_w_1.value())
print("Width:"+str(final_w))
Default_dict["Image_import_dimension"] = final_h
pix = 1
if self.popup_imgRes_ui.radioButton_imgResize_cropPad.isChecked():#cropping and padding method
images = aid_img.image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode="cv2.BORDER_CONSTANT")
elif self.popup_imgRes_ui.radioButton_imgResize_interpolate.isChecked():
interpolation_method = str(self.popup_imgRes_ui.comboBox_resizeMethod.currentText())
Default_dict["Image_import_interpol_method"] = interpolation_method
images = aid_img.image_resize_scale(images,pos_x,pos_y,final_h,final_w,channels,interpolation_method,verbose=False)
else:
print("Invalid image resize method!")
#Save the Default_dict
aid_bin.save_aid_settings(Default_dict)
self.popup_imgRes.accept()
return images
#Define function for the Cancel button:
def popup_imgRes_cancel():
self.popup_imgRes.close()
return
self.popup_imgRes_ui.pushButton_imgResize_ok.clicked.connect(lambda: popup_imgRes_ok(images,channels,pos_x,pos_y))
self.popup_imgRes_ui.pushButton_imgResize_cancel.clicked.connect(popup_imgRes_cancel)
retval = self.popup_imgRes.exec_()
#retval is 0 if the user clicked cancel or just closed the window; in this case just exist the function
if retval==0:
return
#get new pos_x, pos_y (after cropping, the pixel value for the middle of the image is different!)
pos_x = [int(np.round(img.shape[1]/2.0,0)) for img in images]
pos_y = [int(np.round(img.shape[0]/2.0,0)) for img in images]
#Now, all images are of identical shape and can be converted to a numpy array
images = np.array((images), dtype="uint8")
pos_x = np.array((pos_x), dtype="uint8")
pos_y = np.array((pos_y), dtype="uint8")
#Save as foldername.rtdc
fname = url+".rtdc"
if os.path.isfile(fname):
#ask user if file can be overwritten
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>File:"+fname+" already exists. Should it be overwritten?</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Overwrite file?")
msg.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
if retval==0:
try:
os.remove(fname)
aid_img.imgs_2_rtdc(fname,images,pos_x,pos_y)
url_converted.append(fname)
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
retval = msg.exec_()
elif retval==1:
pass
else:
pass
else:#file does not yet exist. Create it
aid_img.imgs_2_rtdc(fname,images,pos_x,pos_y)
url_converted.append(fname)
print("Finished converting! Final dimension of image tensor is:"+str(images.shape))
#Now load the created files directly to drag/drop-region!
self.dataDropped(url_converted)
#####################For .rtdc files:##################################
#where are files?
ind_true = np.where(np.array(isfile)==True)[0]
filenames = list(np.array(l)[ind_true]) #select the indices that are valid
#check if the file can be opened and get some information
fileinfo = []
for i in range(len(filenames)):
rtdc_path = filenames[i]
try:
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
features = list(rtdc_ds["events"].keys())
#Make sure that there is "images", "pos_x" and "pos_y" available
if "image" in features and "pos_x" in features and "pos_y" in features:
nr_images = rtdc_ds["events"]["image"].len()
pix = rtdc_ds.attrs["imaging:pixel size"]
xtra_in_available = len(rtdc_ds.keys())>2 #Is True, only if there are more than 2 elements.
fileinfo.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"pix":pix,"xtra_in":xtra_in_available})
else:
missing = []
for feat in ["image","pos_x","pos_y"]:
if feat not in features:
missing.append(feat)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Essential feature(s) are missing in data-set")
msg.setDetailedText("Data-set: "+rtdc_path+"\nis missing "+str(missing))
msg.setWindowTitle("Missing essential features")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
except Exception as e:
print(e)
#Add the stuff to the combobox on Plot/Peak Tab
url_list = [fileinfo[iterator]["rtdc_path"] for iterator in range(len(fileinfo))]
self.comboBox_chooseRtdcFile.addItems(url_list)
self.comboBox_selectData.addItems(url_list)
if len(url_list)==0: #This fixes the issue that the prog. crashes if accidentially a tableitem is dragged and "dropped" on the table
return
width=self.comboBox_selectData.fontMetrics().boundingRect(max(url_list, key=len)).width()
self.comboBox_selectData.view().setFixedWidth(width+10)
for rowNumber in range(len(fileinfo)):#for url in l:
url = fileinfo[rowNumber]["rtdc_path"]
#add to table
rowPosition = self.table_dragdrop.rowCount()
self.table_dragdrop.insertRow(rowPosition)
columnPosition = 0
line = QtWidgets.QLabel(self.table_dragdrop)
line.setText(url)
line.setDisabled(True)
line.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, line)
# item = QtWidgets.QTableWidgetItem(url)
# item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
# print(item.textAlignment())
# item.setTextAlignment(QtCore.Qt.AlignRight) # change the alignment
# #item.setTextAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AnchorRight) # change the alignment
# self.table_dragdrop.setItem(rowPosition , columnPosition, item ) #
columnPosition = 1
spinb = QtWidgets.QSpinBox(self.table_dragdrop)
spinb.valueChanged.connect(self.dataOverviewOn)
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, spinb)
for columnPosition in range(2,4):
#for each item, also create 2 checkboxes (train/valid)
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 4
#Place a button which allows to show a plot (scatter, histo...lets see)
btn = QtWidgets.QPushButton(self.table_dragdrop)
btn.setMinimumSize(QtCore.QSize(50, 30))
btn.setMaximumSize(QtCore.QSize(50, 30))
btn.clicked.connect(self.button_hist)
btn.setText('Plot')
self.table_dragdrop.setCellWidget(rowPosition, columnPosition, btn)
self.table_dragdrop.resizeRowsToContents()
# columnPosition = 5
# #Place a combobox with the available features
# cb = QtWidgets.QComboBox(self.table_dragdrop)
# cb.addItems(fileinfo[rowNumber]["features"])
# cb.setMinimumSize(QtCore.QSize(70, 30))
# cb.setMaximumSize(QtCore.QSize(70, 30))
# width=cb.fontMetrics().boundingRect(max(fileinfo[rowNumber]["features"], key=len)).width()
# cb.view().setFixedWidth(width+30)
# self.table_dragdrop.setCellWidget(rowPosition, columnPosition, cb)
columnPosition = 5
#Place a combobox with the available features
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, fileinfo[rowNumber]["nr_images"])
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 6
#Field to user-define nr. of cells/epoch
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole,100)
#item.cellChanged.connect(self.dataOverviewOn)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 7
#Pixel size
item = QtWidgets.QTableWidgetItem()
pix = float(fileinfo[rowNumber]["pix"])
#print(pix)
item.setData(QtCore.Qt.EditRole,pix)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 8
#Should data be shuffled (random?)
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Checked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 9
#Zooming factor
item = QtWidgets.QTableWidgetItem()
zoom = 1.0
item.setData(QtCore.Qt.EditRole,zoom)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
columnPosition = 10
#Should xtra_data be used?
item = QtWidgets.QTableWidgetItem()#("item {0} {1}".format(rowNumber, columnNumber))
xtra_in_available = fileinfo[rowNumber]["xtra_in"]
if xtra_in_available:
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
else:
item.setFlags( QtCore.Qt.ItemIsUserCheckable )
item.setCheckState(QtCore.Qt.Unchecked)
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
#Functions for Keras augmentation checkboxes
def keras_changed_rotation(self,on_or_off):
if on_or_off==0:
self.lineEdit_Rotation.setText(str(0))
self.lineEdit_Rotation.setEnabled(False)
elif on_or_off==2:
self.lineEdit_Rotation.setText(str(Default_dict ["rotation"]))
self.lineEdit_Rotation.setEnabled(True)
else:
return
def keras_changed_width_shift(self,on_or_off):
if on_or_off==0:
self.lineEdit_widthShift.setText(str(0))
self.lineEdit_widthShift.setEnabled(False)
elif on_or_off==2:
self.lineEdit_widthShift.setText(str(Default_dict ["width_shift"]))
self.lineEdit_widthShift.setEnabled(True)
else:
return
def keras_changed_height_shift(self,on_or_off):
if on_or_off==0:
self.lineEdit_heightShift.setText(str(0))
self.lineEdit_heightShift.setEnabled(False)
elif on_or_off==2:
self.lineEdit_heightShift.setText(str(Default_dict ["height_shift"]))
self.lineEdit_heightShift.setEnabled(True)
else:
return
def keras_changed_zoom(self,on_or_off):
if on_or_off==0:
self.lineEdit_zoomRange.setText(str(0))
self.lineEdit_zoomRange.setEnabled(False)
elif on_or_off==2:
self.lineEdit_zoomRange.setText(str(Default_dict ["zoom"]))
self.lineEdit_zoomRange.setEnabled(True)
else:
return
def keras_changed_shear(self,on_or_off):
if on_or_off==0:
self.lineEdit_shearRange.setText(str(0))
self.lineEdit_shearRange.setEnabled(False)
elif on_or_off==2:
self.lineEdit_shearRange.setText(str(Default_dict ["shear"]))
self.lineEdit_shearRange.setEnabled(True)
else:
return
def keras_changed_brightplus(self,on_or_off):
if on_or_off==0:
self.spinBox_PlusLower.setValue(0)
self.spinBox_PlusLower.setEnabled(False)
self.spinBox_PlusUpper.setValue(0)
self.spinBox_PlusUpper.setEnabled(False)
elif on_or_off==2:
self.spinBox_PlusLower.setValue(Default_dict ["Brightness add. lower"])
self.spinBox_PlusLower.setEnabled(True)
self.spinBox_PlusUpper.setValue(Default_dict ["Brightness add. upper"])
self.spinBox_PlusUpper.setEnabled(True)
else:
return
def keras_changed_brightmult(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_MultLower.setValue(1.0)
self.doubleSpinBox_MultLower.setEnabled(False)
self.doubleSpinBox_MultUpper.setValue(1.0)
self.doubleSpinBox_MultUpper.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_MultLower.setValue(Default_dict ["Brightness mult. lower"])
self.doubleSpinBox_MultLower.setEnabled(True)
self.doubleSpinBox_MultUpper.setValue(Default_dict ["Brightness mult. upper"])
self.doubleSpinBox_MultUpper.setEnabled(True)
else:
return
def keras_changed_noiseMean(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_GaussianNoiseMean.setValue(0.0)
self.doubleSpinBox_GaussianNoiseMean.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_GaussianNoiseMean.setValue(Default_dict ["Gaussnoise Mean"])
self.doubleSpinBox_GaussianNoiseMean.setEnabled(True)
else:
return
def keras_changed_noiseScale(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_GaussianNoiseScale.setValue(0.0)
self.doubleSpinBox_GaussianNoiseScale.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_GaussianNoiseScale.setValue(Default_dict ["Gaussnoise Scale"])
self.doubleSpinBox_GaussianNoiseScale.setEnabled(True)
else:
return
def keras_changed_contrast(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_contrastLower.setEnabled(False)
self.doubleSpinBox_contrastHigher.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
else:
return
def keras_changed_saturation(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_saturationLower.setEnabled(False)
self.doubleSpinBox_saturationHigher.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_saturationLower.setEnabled(True)
self.doubleSpinBox_saturationHigher.setEnabled(True)
else:
return
def keras_changed_hue(self,on_or_off):
if on_or_off==0:
self.doubleSpinBox_hueDelta.setEnabled(False)
elif on_or_off==2:
self.doubleSpinBox_hueDelta.setEnabled(True)
else:
return
def expert_mode_off(self,on_or_off):
"""
Reset all values on the expert tab to the default values, excluding the metrics
metrics are defined only once when starting fitting and should not be changed
"""
if on_or_off==0: #switch off
self.spinBox_batchSize.setValue(Default_dict["spinBox_batchSize"])
self.spinBox_epochs.setValue(1)
self.checkBox_expt_loss.setChecked(False)
self.expert_loss_off(0)
self.groupBox_learningRate.setChecked(False)
self.expert_learningrate_off(0)
self.checkBox_optimizer.setChecked(False)
self.expert_optimizer_off(0)
def expert_loss_off(self,on_or_off):
if on_or_off==0: #switch off
#switch back to categorical_crossentropy
index = self.comboBox_expt_loss.findText("categorical_crossentropy", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_expt_loss.setCurrentIndex(index)
def expert_learningrate_off(self,on_or_off):
if on_or_off==0: #switch off
#which optimizer is used? (there are different default learning-rates
#for each optimizer!)
optimizer = str(self.comboBox_optimizer.currentText())
self.doubleSpinBox_learningRate.setValue(Default_dict["doubleSpinBox_learningRate_"+optimizer])
self.radioButton_LrCycl.setChecked(False)
self.radioButton_LrExpo.setChecked(False)
self.radioButton_LrConst.setChecked(True)
def expert_optimizer_off(self,on_or_off):
if on_or_off==0: #switch off, set back to categorical_crossentropy
optimizer = "Adam"
index = self.comboBox_optimizer.findText(optimizer, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_optimizer.setCurrentIndex(index)
#also reset the learning rate to the default
self.doubleSpinBox_learningRate.setValue(Default_dict["doubleSpinBox_learningRate_"+optimizer])
def expert_optimizer_changed(self,optimizer_text,listindex):
# print("optimizer_text: "+str(optimizer_text))
# print("listindex: "+str(listindex))
if optimizer_text=="":
return
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
#set the learning rate to the default for this optimizer
value_current = float(item_ui.doubleSpinBox_learningRate.value())
value_wanted = Default_dict["doubleSpinBox_learningRate_"+optimizer_text]
#insert the current value in the optimizer_settings:
item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_text.lower()] = value_current
item_ui.optimizer_settings["comboBox_optimizer"] = optimizer_text
try: #only works on the fitting-popup
text = str(item_ui.textBrowser_FittingInfo.toPlainText())
except:
text = "Epoch"
# print("text: "+str(text))
if value_current!=value_wanted and "Epoch" in text:#avoid that the message pops up when window is created
item_ui.doubleSpinBox_learningRate.setValue(value_wanted)
item_ui.doubleSpinBox_expDecInitLr.setValue(value_wanted)
#Inform user
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setWindowTitle("Learning rate to default")
msg.setText("Learning rate was set to the default for "+optimizer_text)
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def expert_lr_changed(self,value,optimizer_text,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_text.lower()] = value
def update_hist1(self):
feature = str(self.comboBox_feat1.currentText())
feature_values = self.rtdc_ds["events"][feature]
#if len(feature_values)==len(self.rtdc_ds['area_cvx']):
# self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def update_hist2(self):
feature = str(self.comboBox_feat2.currentText())
feature_values = self.rtdc_ds["events"][feature]
#if len(feature_values)==len(self.rtdc_ds['area_cvx']):
#self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def update_scatter(self):
feature_x = str(self.comboBox_feat1.currentText())
feature_x_values = self.rtdc_ds["events"][feature_x]
feature_y = str(self.comboBox_feat2.currentText())
feature_y_values = self.rtdc_ds["events"][feature_y]
if len(feature_x_values)==len(feature_y_values):
#self.histogram = pg.GraphicsWindow()
#plt1 = self.histogram.addPlot()
#y,x = np.histogram(feature_values, bins='auto')
self.plt1.plot(feature_x_values, feature_y_values,pen=None,symbol='o',clear=True)
# self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
# self.w.show()
def button_hist(self,item):
buttonClicked = self.sender()
index = self.table_dragdrop.indexAt(buttonClicked.pos())
rowPosition = index.row()
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.rtdc_ds = rtdc_ds
# feature_values = rtdc_ds[feature]
#Init a popup window
self.w = MyPopup()
self.w.setWindowTitle(rtdc_path)
self.w.setObjectName(_fromUtf8("w"))
self.gridLayout_w2 = QtWidgets.QGridLayout(self.w)
self.gridLayout_w2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_w2.setObjectName(_fromUtf8("gridLayout_w2"))
self.widget = QtWidgets.QWidget(self.w)
self.widget.setMinimumSize(QtCore.QSize(0, 65))
self.widget.setMaximumSize(QtCore.QSize(16777215, 65))
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout_w3 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_w3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_w3.setObjectName(_fromUtf8("horizontalLayout_w3"))
self.verticalLayout_w = QtWidgets.QVBoxLayout()
self.verticalLayout_w.setObjectName(_fromUtf8("verticalLayout_w"))
self.horizontalLayout_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_w.setObjectName(_fromUtf8("horizontalLayout_w"))
self.comboBox_feat1 = QtWidgets.QComboBox(self.widget)
self.comboBox_feat1.setObjectName(_fromUtf8("comboBox_feat1"))
features = list(self.rtdc_ds["events"].keys())
self.comboBox_feat1.addItems(features)
self.horizontalLayout_w.addWidget(self.comboBox_feat1)
self.comboBox_feat2 = QtWidgets.QComboBox(self.widget)
self.comboBox_feat2.setObjectName(_fromUtf8("comboBox_feat2"))
self.comboBox_feat2.addItems(features)
self.horizontalLayout_w.addWidget(self.comboBox_feat2)
self.verticalLayout_w.addLayout(self.horizontalLayout_w)
self.horizontalLayout_w2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_w2.setObjectName(_fromUtf8("horizontalLayout_w2"))
self.pushButton_Hist1 = QtWidgets.QPushButton(self.widget)
self.pushButton_Hist1.setObjectName(_fromUtf8("pushButton_Hist1"))
self.horizontalLayout_w2.addWidget(self.pushButton_Hist1)
self.pushButton_Hist2 = QtWidgets.QPushButton(self.widget)
self.pushButton_Hist2.setObjectName(_fromUtf8("pushButton_Hist2"))
self.horizontalLayout_w2.addWidget(self.pushButton_Hist2)
self.verticalLayout_w.addLayout(self.horizontalLayout_w2)
self.horizontalLayout_w3.addLayout(self.verticalLayout_w)
self.verticalLayout_w2 = QtWidgets.QVBoxLayout()
self.verticalLayout_w2.setObjectName(_fromUtf8("verticalLayout_w2"))
self.pushButton_Scatter = QtWidgets.QPushButton(self.widget)
self.pushButton_Scatter.setObjectName(_fromUtf8("pushButton_Scatter"))
self.verticalLayout_w2.addWidget(self.pushButton_Scatter)
self.checkBox_ScalePix = QtWidgets.QCheckBox(self.widget)
self.checkBox_ScalePix.setObjectName(_fromUtf8("checkBox_ScalePix"))
self.verticalLayout_w2.addWidget(self.checkBox_ScalePix)
self.horizontalLayout_w3.addLayout(self.verticalLayout_w2)
self.gridLayout_w2.addWidget(self.widget, 0, 0, 1, 1)
self.pushButton_Hist1.setText("Hist")
self.pushButton_Hist1.clicked.connect(self.update_hist1)
self.pushButton_Hist2.setText("Hist")
self.pushButton_Hist2.clicked.connect(self.update_hist2)
self.pushButton_Scatter.setText("Scatter")
self.pushButton_Scatter.clicked.connect(self.update_scatter)
self.checkBox_ScalePix.setText("Scale by pix")
self.histogram = pg.GraphicsWindow()
self.plt1 = self.histogram.addPlot()
# y,x = np.histogram(feature_values, bins='auto')
# plt1.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150))
self.gridLayout_w2.addWidget(self.histogram,1, 0, 1, 1)
self.w.show()
def update_historyplot_pop(self,listindex):
#listindex = self.popupcounter-1 #len(self.fittingpopups_ui)-1
#After the first epoch there are checkboxes available. Check, if user checked some:
colcount = int(self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.columnCount())
#Collect items that are checked
selected_items,Colors = [],[]
for colposition in range(colcount):
#is it checked for train?
cb = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
self.Colors = Colors
Histories = self.fittingpopups_ui[listindex].Histories
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
# if len(DF1)>0:
# DF1 = pd.concat(DF1)
# else:
# return
self.fittingpopups_ui[listindex].widget_pop.clear()
#Create fresh plot
plt1 = self.fittingpopups_ui[listindex].widget_pop.addPlot()
plt1.showGrid(x=True,y=True)
plt1.addLegend()
plt1.setLabel('bottom', 'Epoch', units='')
#Create a dict that stores plots for each metric (for real time plotting)
self.fittingpopups_ui[listindex].historyscatters = dict()
for i in range(len(selected_items)):
key = selected_items[i]
df = DF1[key]
color = self.Colors[i]
pen_rollmedi = list(color.color().getRgb())
pen_rollmedi = pg.mkColor(pen_rollmedi)
pen_rollmedi = pg.mkPen(color=pen_rollmedi,width=6)
color = list(color.color().getRgb())
color[-1] = int(0.6*color[-1])
color = tuple(color)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
#print(df)
historyscatter = plt1.plot(range(len(df)), df.values, pen=None,symbol='o',symbolPen=None,symbolBrush=brush,name=key,clear=False)
#self.fittingpopups_ui[listindex].historyscatters.append(historyscatter)
self.fittingpopups_ui[listindex].historyscatters[key]=historyscatter
def stop_fitting_pop(self,listindex):
#listindex = len(self.fittingpopups_ui)-1
epochs = self.fittingpopups_ui[listindex].epoch_counter
#Stop button on the fititng popup
#Should stop the fitting process and save the metafile
#1. Change the nr. requested epochs to a smaller number
self.fittingpopups_ui[listindex].spinBox_NrEpochs.setValue(epochs-1)
#2. Check the box which will cause that the new parameters are applied at next epoch
self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.setChecked(True)
def pause_fitting_pop(self,listindex):
#Just change the text on the button
if str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())==" ":
#If the the text on the button was Pause, change it to Continue
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setText("")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setStyleSheet("background-color: green")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"continue.png")))
elif str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())=="":
#If the the text on the button was Continue, change it to Pause
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setText(" ")
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"pause.png")))
self.fittingpopups_ui[listindex].pushButton_Pause_pop.setStyleSheet("")
def saveTextWindow_pop(self,listindex):
#Get the entire content of textBrowser_FittingInfo
text = str(self.fittingpopups_ui[listindex].textBrowser_FittingInfo.toPlainText())
#Ask the user where to save the stuff
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Fitting info', Default_dict["Path of last model"]," (*.txt)")
filename = filename[0]
#Save to this filename
if len(filename)>0:
f = open(filename,'w')
f.write(text)
f.close()
def clearTextWindow_pop(self,listindex):
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.clear()
def showModelSumm_pop(self,listindex):
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
def saveModelSumm_pop(self,listindex):
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
#Ask the user where to save the stuff
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Model summary', Default_dict["Path of last model"]," (*.txt)")
filename = filename[0]
#Save to this filename
f = open(filename,'w')
f.write(text)
f.close()
#class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert) #
def get_class_weight(self,SelectedFiles,lossW_expert,custom_check_classes=False):
t1 = time.time()
print("Getting dictionary for class_weight")
if lossW_expert=="None":
return None
elif lossW_expert=="":
return None
elif lossW_expert=="Balanced":
#Which are training files?
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = list(np.array(SelectedFiles)[ind])
classes = [int(selectedfile["class"]) for selectedfile in SelectedFiles_train]
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train]
classes_uni = np.unique(classes)
counter = {}
for class_ in classes_uni:
ind = np.where(np.array(classes)==class_)[0]
nr_events_epoch_class = np.array(nr_events_epoch)[ind]
counter[class_] = np.sum(nr_events_epoch_class)
max_val = float(max(counter.values()))
return {class_id : max_val/num_images for class_id, num_images in counter.items()}
elif lossW_expert.startswith("{"):#Custom loss weights
class_weights = eval(lossW_expert)
if custom_check_classes:#Check that each element in classes_uni is contained in class_weights.keys()
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = list(np.array(SelectedFiles)[ind])
classes = [int(selectedfile["class"]) for selectedfile in SelectedFiles_train]
classes_uni = np.unique(classes)
classes_uni = np.sort(classes_uni)
class_weights_keys = np.sort([int(a) for a in class_weights.keys()])
#each element in classes_uni has to be equal to class_weights_keys
equal = np.array_equal(classes_uni,class_weights_keys)
if equal == True:
return class_weights
else:
#If the equal is false I'm really in trouble...
#run the function again, but request 'Balanced' weights. I'm not sure if this should be the default...
class_weights = self.get_class_weight(SelectedFiles,"Balanced")
return ["Balanced",class_weights]
else:
return class_weights
t2 = time.time()
dt = np.round(t2-t1,2)
print("Comp. time = "+str(dt))
def accept_lr_range(self):
lr_start = str(self.popup_lrfinder_ui.lineEdit_LrMin.text())
lr_stop = str(self.popup_lrfinder_ui.lineEdit_LrMax.text())
if len(lr_start)>0 and len(lr_stop)>0:
self.lineEdit_cycLrMin.setText(lr_start)
self.lineEdit_cycLrMax.setText(lr_stop)
else:
print("Found no values for LR range")
def accept_lr_value(self):
single_lr = self.popup_lrfinder_ui.lineEdit_singleLr.text()
if len(single_lr)>0:
lr_value = float(single_lr)
self.doubleSpinBox_learningRate.setValue(lr_value)
self.doubleSpinBox_expDecInitLr.setValue(lr_value)
else:
print("Found no value for single LR!")
def reset_lr_settings(self):
self.popup_lrfinder_ui.lineEdit_startLr.setText(_translate("Form_LrFinder", "1e-10", None))
self.popup_lrfinder_ui.lineEdit_stopLr.setText(_translate("Form_LrFinder", "0.1", None))
self.popup_lrfinder_ui.doubleSpinBox_percDataT.setProperty("value", 100.0)
self.popup_lrfinder_ui.doubleSpinBox_percDataV.setProperty("value", 100.0)
self.popup_lrfinder_ui.spinBox_batchSize.setValue(Default_dict["spinBox_batchSize"])
self.popup_lrfinder_ui.spinBox_lineWidth.setProperty("value", 6)
self.popup_lrfinder_ui.spinBox_epochs.setProperty("value", 5)
def reset_lr_value(self):
self.popup_lrfinder_ui.lineEdit_singleLr.setText("")
#Uncheck and Check the groupbox to refresh the line
self.popup_lrfinder_ui.groupBox_singleLr.setChecked(False)
self.popup_lrfinder_ui.groupBox_singleLr.setChecked(True)
def reset_lr_range(self):
self.popup_lrfinder_ui.lineEdit_LrMin.setText("")
self.popup_lrfinder_ui.lineEdit_LrMax.setText("")
#Uncheck and Check the groupbox to refresh the range
self.popup_lrfinder_ui.groupBox_LrRange.setChecked(False)
self.popup_lrfinder_ui.groupBox_LrRange.setChecked(True)
def popup_lr_finder(self):
SelectedFiles = self.items_clicked()
self.popup_lrfinder = MyPopup()
self.popup_lrfinder_ui = aid_frontend.popup_lrfinder()
self.popup_lrfinder_ui.setupUi(self.popup_lrfinder) #open a popup for lr finder
#Get information about the model
#check, which radiobutton is clicked and just copy paste the text from there
if self.radioButton_NewModel.isChecked():
modelname = str(self.comboBox_ModelSelection.currentText())
if modelname==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
elif self.radioButton_LoadContinueModel.isChecked():
modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadRestartModel.isChecked():
modelname = str(self.lineEdit_LoadModelPath.text())
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please specify a model using the radiobuttons on the 'Define Model' -tab")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
in_dim = int(self.spinBox_imagecrop.value())
#Put information onto UI
self.popup_lrfinder_ui.lineEdit_loadModel.setText(modelname)
self.popup_lrfinder_ui.spinBox_Crop_inpImgSize.setValue(in_dim)
color_mode = self.get_color_mode()
self.popup_lrfinder_ui.comboBox_colorMode.addItem(color_mode)
loss_str = str(self.comboBox_expt_loss.currentText())
self.popup_lrfinder_ui.comboBox_expt_loss.addItem(loss_str)
optimizer_str = str(self.comboBox_optimizer.currentText())
self.popup_lrfinder_ui.comboBox_optimizer.addItem(optimizer_str)
batch_size = self.spinBox_batchSize.value()
self.popup_lrfinder_ui.spinBox_batchSize.setValue(batch_size)
#Connect action_lr_finder function to button
self.popup_lrfinder_ui.pushButton_LrFindRun.clicked.connect(lambda: self.action_initialize_model(duties="initialize_lrfind"))
self.popup_lrfinder_ui.pushButton_rangeAccept.clicked.connect(self.accept_lr_range)
self.popup_lrfinder_ui.pushButton_singleAccept.clicked.connect(self.accept_lr_value)
self.popup_lrfinder_ui.pushButton_LrReset.clicked.connect(self.reset_lr_settings)
self.popup_lrfinder_ui.pushButton_singleReset.clicked.connect(self.reset_lr_value)
self.popup_lrfinder_ui.pushButton_rangeReset.clicked.connect(self.reset_lr_range)
#Update the plot when any plotting option is changed
self.popup_lrfinder_ui.comboBox_metric.currentIndexChanged.connect(self.update_lrfind_plot)
self.popup_lrfinder_ui.spinBox_lineWidth.valueChanged.connect(self.update_lrfind_plot)
self.popup_lrfinder_ui.checkBox_smooth.toggled.connect(self.update_lrfind_plot)
#LR single value when groupbox is toggled
self.popup_lrfinder_ui.groupBox_singleLr.toggled.connect(self.get_lr_single)
#LR range when groupbox is toggled
self.popup_lrfinder_ui.groupBox_LrRange.toggled.connect(self.get_lr_range)
#compute the number of steps/epoch
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
def update_stepsPerEpoch():
batch_size = self.popup_lrfinder_ui.spinBox_batchSize.value()
perc_data = self.popup_lrfinder_ui.doubleSpinBox_percDataT.value()
nr_events = (perc_data/100)*nr_events_train_total
stepsPerEpoch = np.ceil(nr_events / float(batch_size))
self.popup_lrfinder_ui.spinBox_stepsPerEpoch.setValue(stepsPerEpoch)
update_stepsPerEpoch()
self.popup_lrfinder_ui.spinBox_batchSize.valueChanged.connect(update_stepsPerEpoch)
self.popup_lrfinder_ui.doubleSpinBox_percDataT.valueChanged.connect(update_stepsPerEpoch)
self.popup_lrfinder.show()
def popup_clr_settings(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_clrsettings = MyPopup()
item_ui.popup_clrsettings_ui = aid_frontend.Ui_Clr_settings()
item_ui.popup_clrsettings_ui.setupUi(item_ui.popup_clrsettings) #open a popup for lr plotting
##Manual insertion##
item_ui.popup_clrsettings_ui.spinBox_stepSize.setProperty("value", item_ui.clr_settings["step_size"])
item_ui.popup_clrsettings_ui.doubleSpinBox_gamma.setProperty("value", item_ui.clr_settings["gamma"])
def clr_settings_ok():
step_size = int(item_ui.popup_clrsettings_ui.spinBox_stepSize.value())
gamma = float(item_ui.popup_clrsettings_ui.doubleSpinBox_gamma.value())
item_ui.clr_settings["step_size"] = step_size #Number of epochs to fulfill half a cycle
item_ui.clr_settings["gamma"] = gamma #gamma factor for Exponential decrease method (exp_range)
print("Settings for cyclical learning rates were changed.")
#close the popup
item_ui.popup_clrsettings = None
item_ui.popup_clrsettings_ui = None
def clr_settings_cancel():#close the popup
item_ui.popup_clrsettings = None
item_ui.popup_clrsettings_ui = None
item_ui.popup_clrsettings_ui.pushButton_ok.clicked.connect(clr_settings_ok)
item_ui.popup_clrsettings_ui.pushButton_cancel.clicked.connect(clr_settings_cancel)
item_ui.popup_clrsettings.show()
def popup_lr_plot(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_lrplot = MyPopup()
item_ui.popup_lrplot_ui = aid_frontend.popup_lrplot()
item_ui.popup_lrplot_ui.setupUi(item_ui.popup_lrplot) #open a popup for lr plotting
#compute total number of epochs that will be fitted
spinBox_NrEpochs = item_ui.spinBox_NrEpochs.value() #my own loop
spinBox_epochs = item_ui.spinBox_epochs.value() #inside model.fit()
nr_epochs = spinBox_NrEpochs*spinBox_epochs
item_ui.popup_lrplot_ui.spinBox_totalEpochs.setValue(nr_epochs)
#Get the number of training examples
SelectedFiles = self.items_clicked()
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
if nr_events_train_total==0 and item_ui.radioButton_LrConst.isChecked()==False:
#for Cyclical learning rates and Exponential learning rates, the
#number of training images is needed
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no training data. Nr. of training images is required for this plot.")
msg.setWindowTitle("Nr. of training images = 0")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
text_info = ""
if item_ui.radioButton_LrConst.isChecked():
text_info+="Constant learning rate\n"
epochs_plot = np.array(range(nr_epochs))
const_lr = float(self.doubleSpinBox_learningRate.value())
learningrates = np.repeat(const_lr,nr_epochs)
elif item_ui.radioButton_LrCycl.isChecked():
text_info+="Cyclical learning rates\n"
base_lr = float(item_ui.lineEdit_cycLrMin.text())
max_lr = float(item_ui.lineEdit_cycLrMax.text())
batch_size = int(item_ui.spinBox_batchSize.value())
step_size = item_ui.clr_settings["step_size"] #batch updates in a half cycle
step_size_ = step_size*int(np.round(nr_events_train_total / batch_size))#number of steps in one epoch
mode = str(item_ui.comboBox_cycLrMethod.currentText())
clr_iterations = nr_epochs*int(np.round(nr_events_train_total / batch_size))#number of cycles
nr_cycles = (clr_iterations/step_size_)/2.0#number of cycles
gamma = item_ui.clr_settings["gamma"] #gamma factor for the exp_range
#Generate text to diplay the settings used
text_info+="Nr. of training images: "+str(nr_events_train_total)+"\n"
text_info+="base_lr: "+str(base_lr)+"\n"
text_info+="max_lr: "+str(max_lr)+"\n"
text_info+="batch_size: "+str(batch_size)+"\n"
text_info+="mode: "+str(mode)+"\n"
text_info+="gamma: "+str(gamma)+"\n"
text_info+="Nr. of epochs to fulfill one cycle: "+str(2*step_size)+"\n"
#text_info+="Total nr. of lr adjustmend: "+str(step_size_)+"\n"
text_info+="Total nr. of lr adjustments: "+str(clr_iterations)+"\n"
text_info+="Total nr. of cycles: "+str(nr_cycles)+"\n"
#Request the learning rates from the class cyclicLR
clr_iterations = np.arange(clr_iterations)
clr_1 = aid_dl.cyclicLR(base_lr=base_lr,max_lr=max_lr,step_size=step_size_,mode=mode,gamma=gamma)
clr_1.clr_iterations=clr_iterations#pass the number of clr iterations to the class
learningrates = clr_1.clr() #compute the learning rates for each iteration
#convert clr_iterations back to "epochs"
epochs_plot = clr_iterations/int(np.round(nr_events_train_total / batch_size))
elif item_ui.radioButton_LrExpo.isChecked():
text_info+="Exponentially decreased learning rates\n"
initial_lr = float(item_ui.doubleSpinBox_expDecInitLr.value())
decay_steps = int(item_ui.spinBox_expDecSteps.value())
decay_rate = float(item_ui.doubleSpinBox_expDecRate.value())
batch_size = int(item_ui.spinBox_batchSize.value())
text_info+="Nr. of training images: "+str(nr_events_train_total)+"\n"
text_info+="initial_lr: "+str(initial_lr)+"\n"
text_info+="decay_steps: "+str(decay_steps)+"\n"
text_info+="decay_rate: "+str(decay_rate)+"\n"
#epochs_plot = np.array(range(nr_epochs))
epochs_plot = nr_epochs * int(np.round(nr_events_train_total / batch_size))
epochs_plot = np.arange(epochs_plot)
exp_decay = aid_dl.exponentialDecay(initial_lr=initial_lr, decay_steps=decay_steps, decay_rate=decay_rate)
exp_decay.iterations=epochs_plot#pass the number of clr iterations to the class
learningrates = exp_decay.exp_decay()
epochs_plot = epochs_plot/int(np.round(nr_events_train_total / batch_size))
#learningrates = aid_dl.exponentialDecay(epochs_plot,initial_lr=initial_lr, decay_steps=decay_steps, decay_rate=decay_rate)
def refreshPlot():
try: # try to empty the plot
item_ui.popup_lrplot_ui.lr_plot.removeItem(item_ui.lr_line2)
except:
pass
#Get design settings
color = item_ui.popup_lrplot_ui.pushButton_color.palette().button().color()
width = int(item_ui.popup_lrplot_ui.spinBox_lineWidth.value())
color = list(color.getRgb())
color = tuple(color)
pencolor=pg.mkPen(color, width=width)
#define curve and add to plot
item_ui.lr_line2 = pg.PlotCurveItem(x=epochs_plot, y=learningrates,pen=pencolor)
item_ui.popup_lrplot_ui.lr_plot.addItem(item_ui.lr_line2)
refreshPlot()
item_ui.popup_lrplot_ui.pushButton_refreshPlot.clicked.connect(refreshPlot)
item_ui.popup_lrplot_ui.textBrowser_lrSettings.setText(text_info)
item_ui.popup_lrplot.show()
def lossWeights_activated(self,on_or_off,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
if on_or_off==False:#0 means switched OFF
item_ui.lineEdit_lossW.setText("")
item_ui.pushButton_lossW.setEnabled(False)
#this happens when the user activated the expert option "loss weights"
elif on_or_off==True:#2 means switched ON
#Activate button
item_ui.pushButton_lossW.setEnabled(True)
self.lossWeights_popup(listindex)
def lossWeights_popup(self,listindex):
if listindex==-1:
item_ui = self
SelectedFiles = self.items_clicked()
else:
item_ui = self.fittingpopups_ui[listindex]
SelectedFiles = item_ui.SelectedFiles
item_ui.popup_lossW = MyPopup()
item_ui.popup_lossW_ui = aid_frontend.popup_lossweights()
item_ui.popup_lossW_ui.setupUi(item_ui.popup_lossW) #open a popup to show the numbers of events in each class in a table
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
item_ui.popup_lossW_ui.tableWidget_lossW.setColumnCount(5)
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = nr_ind
item_ui.popup_lossW_ui.tableWidget_lossW.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class", "Events tot." ,"Events/Epoch", "Events/Epoch[%]", "Loss weight"]
item_ui.popup_lossW_ui.tableWidget_lossW.setHorizontalHeaderLabels(header_labels)
header = item_ui.popup_lossW_ui.tableWidget_lossW.horizontalHeader()
for i in range(len(header_labels)):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
#Fill the table
rowPosition = 0
#Training info
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_train_total = np.sum([int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train])
#Total nr of cells for each index
for index in np.unique(indices_train):
colPos = 0 #"Class" #put the index (class!) in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
#Get the training files of that index
ind = np.where(indices_train==index)[0]
SelectedFiles_train_index = np.array(SelectedFiles_train)[ind]
colPos = 1 #"Events tot."
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 2 #"Events/Epoch"
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 3 #"Events/Epoch[%]"
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole, str(np.round(np.sum(nr_events_epoch)/float(nr_events_train_total),2)))
item_ui.popup_lossW_ui.tableWidget_lossW.setItem(rowPosition, colPos, item)
colPos = 4 #"Loss weights"
#for each item create a spinbopx (trainability)
spinb = QtWidgets.QDoubleSpinBox(item_ui.popup_lossW_ui.tableWidget_lossW)
spinb.setEnabled(False)
spinb.setMinimum(-99999)
spinb.setMaximum(99999)
spinb.setSingleStep(0.1)
spinb.setValue(1.0) #Default in Keras is "None", which means class_weight=1.0
item_ui.popup_lossW_ui.tableWidget_lossW.setCellWidget(rowPosition, colPos, spinb)
rowPosition += 1
item_ui.popup_lossW_ui.tableWidget_lossW.resizeColumnsToContents()
item_ui.popup_lossW_ui.tableWidget_lossW.resizeRowsToContents()
item_ui.popup_lossW.show()
item_ui.popup_lossW_ui.pushButton_pop_lossW_cancel.clicked.connect(lambda: self.lossW_cancel(listindex))
item_ui.popup_lossW_ui.pushButton_pop_lossW_ok.clicked.connect(lambda: self.lossW_ok(listindex))
item_ui.popup_lossW_ui.comboBox_lossW.currentIndexChanged.connect(lambda on_or_off: self.lossW_comboB(on_or_off,listindex))
def optimizer_change_settings_popup(self,listindex):
if listindex==-1:
item_ui = self
else:
item_ui = self.fittingpopups_ui[listindex]
item_ui.popup_optim = MyPopup()
item_ui.popup_optim_ui = aid_frontend.Ui_Form_expt_optim()
item_ui.popup_optim_ui.setupUi(item_ui.popup_optim) #open a popup to show advances settings for optimizer
##Manual insertion##
optimizer_name = item_ui.optimizer_settings["comboBox_optimizer"].lower()
if optimizer_name=='sgd':
item_ui.popup_optim_ui.radioButton_sgd.setChecked(True)
elif optimizer_name=='rmsprop':
item_ui.popup_optim_ui.radioButton_rms.setChecked(True)
elif optimizer_name=='adagrad':
item_ui.popup_optim_ui.radioButton_adagrad.setChecked(True)
elif optimizer_name=='adadelta':
item_ui.popup_optim_ui.radioButton_adadelta.setChecked(True)
elif optimizer_name=='adam':
item_ui.popup_optim_ui.radioButton_adam.setChecked(True)
elif optimizer_name=='adamax':
item_ui.popup_optim_ui.radioButton_adamax.setChecked(True)
elif optimizer_name=='nadam':
item_ui.popup_optim_ui.radioButton_nadam.setChecked(True)
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_sgd"])
item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.setValue(item_ui.optimizer_settings["doubleSpinBox_sgd_momentum"])
item_ui.popup_optim_ui.checkBox_sgd_nesterov.setChecked(item_ui.optimizer_settings["checkBox_sgd_nesterov"])
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_rmsprop"])
item_ui.popup_optim_ui.doubleSpinBox_rms_rho.setValue(item_ui.optimizer_settings["doubleSpinBox_rms_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adam"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_adam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_adam_beta2"])
item_ui.popup_optim_ui.checkBox_adam_amsgrad.setChecked(item_ui.optimizer_settings["checkBox_adam_amsgrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_nadam"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_nadam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_nadam_beta2"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adadelta"])
item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.setValue(item_ui.optimizer_settings["doubleSpinBox_adadelta_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adagrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.setValue(item_ui.optimizer_settings["doubleSpinBox_lr_adamax"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.setValue(item_ui.optimizer_settings["doubleSpinBox_adamax_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.setValue(item_ui.optimizer_settings["doubleSpinBox_adamax_beta2"])
def change_lr(lr):
item_ui.doubleSpinBox_learningRate.setValue(lr)
item_ui.doubleSpinBox_expDecInitLr.setValue(lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.valueChanged.connect(change_lr)
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.valueChanged.connect(change_lr)
def change_optimizer(optimizer_name):
index = item_ui.comboBox_optimizer.findText(optimizer_name, QtCore.Qt.MatchFixedString)
if index >= 0:
item_ui.comboBox_optimizer.setCurrentIndex(index)
#get the learning rate for that optimizer
lr = item_ui.optimizer_settings["doubleSpinBox_lr_"+optimizer_name.lower()]
change_lr(lr)
item_ui.popup_optim_ui.radioButton_adam.toggled.connect(lambda: change_optimizer("Adam"))
item_ui.popup_optim_ui.radioButton_sgd.toggled.connect(lambda: change_optimizer("SGD"))
item_ui.popup_optim_ui.radioButton_rms.toggled.connect(lambda: change_optimizer("RMSprop"))
item_ui.popup_optim_ui.radioButton_adagrad.toggled.connect(lambda: change_optimizer("Adagrad"))
item_ui.popup_optim_ui.radioButton_adadelta.toggled.connect(lambda: change_optimizer("Adadelta"))
item_ui.popup_optim_ui.radioButton_adamax.toggled.connect(lambda: change_optimizer("Adamax"))
item_ui.popup_optim_ui.radioButton_nadam.toggled.connect(lambda: change_optimizer("Nadam"))
def ok():
doubleSpinBox_lr_sgd = float(item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.value())
doubleSpinBox_sgd_momentum = float(item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.value())
checkBox_sgd_nesterov = bool(item_ui.popup_optim_ui.checkBox_sgd_nesterov.isChecked())
doubleSpinBox_lr_rmsprop = float(item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.value())
doubleSpinBox_rms_rho = float(item_ui.popup_optim_ui.doubleSpinBox_rms_rho.value())
doubleSpinBox_lr_adam = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adam.value())
doubleSpinBox_adam_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.value())
doubleSpinBox_adam_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.value())
checkBox_adam_amsgrad = bool(item_ui.popup_optim_ui.checkBox_adam_amsgrad.isChecked())
doubleSpinBox_lr_adadelta = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.value())
doubleSpinBox_adadelta_rho = float(item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.value())
doubleSpinBox_lr_nadam = float(item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.value())
doubleSpinBox_nadam_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.value())
doubleSpinBox_nadam_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.value())
doubleSpinBox_lr_adagrad = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.value())
doubleSpinBox_lr_adamax = float(item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.value())
doubleSpinBox_adamax_beta2 = float(item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.value())
doubleSpinBox_adamax_beta1 = float(item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.value())
item_ui.optimizer_settings["doubleSpinBox_lr_sgd"] = doubleSpinBox_lr_sgd
item_ui.optimizer_settings["doubleSpinBox_sgd_momentum"] = doubleSpinBox_sgd_momentum
item_ui.optimizer_settings["checkBox_sgd_nesterov"] = checkBox_sgd_nesterov
item_ui.optimizer_settings["doubleSpinBox_lr_rmsprop"] = doubleSpinBox_lr_rmsprop
item_ui.optimizer_settings["doubleSpinBox_rms_rho"] = doubleSpinBox_rms_rho
item_ui.optimizer_settings["doubleSpinBox_lr_adam"] = doubleSpinBox_lr_adam
item_ui.optimizer_settings["doubleSpinBox_adam_beta1"] = doubleSpinBox_adam_beta1
item_ui.optimizer_settings["doubleSpinBox_adam_beta2"] = doubleSpinBox_adam_beta2
item_ui.optimizer_settings["checkBox_adam_amsgrad"] = checkBox_adam_amsgrad
item_ui.optimizer_settings["doubleSpinBox_lr_adadelta"] = doubleSpinBox_lr_adadelta
item_ui.optimizer_settings["doubleSpinBox_adadelta_rho"] = doubleSpinBox_adadelta_rho
item_ui.optimizer_settings["doubleSpinBox_lr_nadam"] = doubleSpinBox_lr_nadam
item_ui.optimizer_settings["doubleSpinBox_nadam_beta1"] = doubleSpinBox_nadam_beta1
item_ui.optimizer_settings["doubleSpinBox_nadam_beta2"] = doubleSpinBox_nadam_beta2
item_ui.optimizer_settings["doubleSpinBox_lr_adagrad"] = doubleSpinBox_lr_adagrad
item_ui.optimizer_settings["doubleSpinBox_lr_adamax"] = doubleSpinBox_lr_adamax
item_ui.optimizer_settings["doubleSpinBox_adamax_beta1"] = doubleSpinBox_adamax_beta1
item_ui.optimizer_settings["doubleSpinBox_adamax_beta2"] = doubleSpinBox_adamax_beta2
#close the popup
item_ui.popup_optim = None
item_ui.popup_optim_ui = None
print("Advanced settings for optimizer were changed.")
def cancel():#close the popup
item_ui.popup_optim = None
item_ui.popup_optim_ui = None
def reset():
print("Reset optimizer settings (in UI). To accept, click OK")
optimizer_default = aid_dl.get_optimizer_settings()
item_ui.popup_optim_ui.doubleSpinBox_lr_sgd.setValue(optimizer_default["doubleSpinBox_lr_sgd"])
item_ui.popup_optim_ui.doubleSpinBox_sgd_momentum.setValue(optimizer_default["doubleSpinBox_sgd_momentum"])
item_ui.popup_optim_ui.checkBox_sgd_nesterov.setChecked(optimizer_default["checkBox_sgd_nesterov"])
item_ui.popup_optim_ui.doubleSpinBox_lr_rmsprop.setValue(optimizer_default["doubleSpinBox_lr_rmsprop"])
item_ui.popup_optim_ui.doubleSpinBox_rms_rho.setValue(optimizer_default["doubleSpinBox_rms_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adam.setValue(optimizer_default["doubleSpinBox_lr_adam"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta1.setValue(optimizer_default["doubleSpinBox_adam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adam_beta2.setValue(optimizer_default["doubleSpinBox_adam_beta2"])
item_ui.popup_optim_ui.checkBox_adam_amsgrad.setChecked(optimizer_default["checkBox_adam_amsgrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_nadam.setValue(optimizer_default["doubleSpinBox_lr_nadam"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta1.setValue(optimizer_default["doubleSpinBox_nadam_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_nadam_beta2.setValue(optimizer_default["doubleSpinBox_nadam_beta2"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adadelta.setValue(optimizer_default["doubleSpinBox_lr_adadelta"])
item_ui.popup_optim_ui.doubleSpinBox_adadelta_rho.setValue(optimizer_default["doubleSpinBox_adadelta_rho"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adagrad.setValue(optimizer_default["doubleSpinBox_lr_adagrad"])
item_ui.popup_optim_ui.doubleSpinBox_lr_adamax.setValue(optimizer_default["doubleSpinBox_lr_adamax"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta1.setValue(optimizer_default["doubleSpinBox_adamax_beta1"])
item_ui.popup_optim_ui.doubleSpinBox_adamax_beta2.setValue(optimizer_default["doubleSpinBox_adamax_beta2"])
item_ui.popup_optim_ui.pushButton_ok.clicked.connect(ok)
item_ui.popup_optim_ui.pushButton_cancel.clicked.connect(cancel)
item_ui.popup_optim_ui.pushButton_reset.clicked.connect(reset)
item_ui.popup_optim.show()
def onLayoutChange(self,app):
#Get the text of the triggered layout
layout_trig = (self.sender().text()).split(" layout")[0]
layout_current = Default_dict["Layout"]
if layout_trig == layout_current:
self.statusbar.showMessage(layout_current+" layout is already in use",2000)
return
elif layout_trig == "Normal":
#Change Layout in Defaultdict to "Normal", such that next start will use Normal layout
Default_dict["Layout"] = "Normal"
app.setStyleSheet("")
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
elif layout_trig == "Dark":
#Change Layout in Defaultdict to "Dark", such that next start will use Dark layout
Default_dict["Layout"] = "Dark"
dir_layout = os.path.join(dir_root,"layout_dark.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
elif layout_trig == "DarkOrange":
#Change Layout in Defaultdict to "Dark", such that next start will use Dark layout
Default_dict["Layout"] = "DarkOrange"
dir_layout = os.path.join(dir_root,"layout_darkorange.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/nphase/qt-ping-grapher/blob/master/resources/darkorange.stylesheet
f = f.read()
app.setStyleSheet(f)
#Standard is with tooltip
self.actionTooltipOnOff.setChecked(True)
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def onTooltipOnOff(self,app):
#what is the current layout?
if bool(self.actionLayout_Normal.isChecked())==True: #use normal layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
app.setStyleSheet("")
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
app.setStyleSheet("""QToolTip {
opacity: 0
}""")
elif bool(self.actionLayout_Dark.isChecked())==True: #use dark layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
dir_layout = os.path.join(dir_root,"layout_dark.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
dir_layout = os.path.join(dir_root,"layout_dark_notooltip.txt")#dir to settings
f = open(dir_layout, "r")#I obtained the layout file from: https://github.com/ColinDuquesnoy/QDarkStyleSheet/blob/master/qdarkstyle/style.qss
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionLayout_DarkOrange.isChecked())==True: #use darkorange layout
if bool(self.actionTooltipOnOff.isChecked())==True: #with tooltips
dir_layout = os.path.join(dir_root,"layout_darkorange.txt")#dir to settings
f = open(dir_layout, "r") #I obtained the layout file from: https://github.com/nphase/qt-ping-grapher/blob/master/resources/darkorange.stylesheet
f = f.read()
app.setStyleSheet(f)
elif bool(self.actionTooltipOnOff.isChecked())==False: #no tooltips
dir_layout = os.path.join(dir_root,"layout_darkorange_notooltip.txt")#dir to settings
f = open(dir_layout, "r")
f = f.read()
app.setStyleSheet(f)
def onIconThemeChange(self):
#Get the text of the triggered icon theme
icontheme_trig = self.sender().text()
icontheme_currenent = Default_dict["Icon theme"]
if icontheme_trig == icontheme_currenent:
self.statusbar.showMessage(icontheme_currenent+" is already in use",2000)
return
elif icontheme_trig == "Icon theme 1":
Default_dict["Icon theme"] = "Icon theme 1"
self.statusbar.showMessage("Icon theme 1 will be used after restart",2000)
elif icontheme_trig == "Icon theme 2":
Default_dict["Icon theme"] = "Icon theme 2"
self.statusbar.showMessage("Icon theme 2 will be used after restart",2000)
#Save the layout to Default_dict
with open(dir_settings, 'w') as f:
json.dump(Default_dict,f)
def items_clicked(self):
#This function checks, which data has been checked on table_dragdrop and returns the necessary data
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#is it checked for train?
cb_t = self.table_dragdrop.item(rowPosition, 2)
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
if cb_t.checkState() == QtCore.Qt.Checked and nr_events_epoch>0: #add to training files if the user wants more than 0 images per epoch
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
hash_ = aid_bin.hashfunction(rtdc_path)#rtdc_ds.hash
features = list(rtdc_ds["events"].keys())
nr_images = rtdc_ds["events"]["image"].len()
SelectedFiles.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"hash":hash_,"xtra_in":xtra_in})
cb_v = self.table_dragdrop.item(rowPosition, 3)
if cb_v.checkState() == QtCore.Qt.Checked and nr_events_epoch>0:
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
hash_ = aid_bin.hashfunction(rtdc_path)
features = list(rtdc_ds["events"].keys())
nr_images = rtdc_ds["events"]["image"].len()
SelectedFiles.append({"rtdc_ds":rtdc_ds,"rtdc_path":rtdc_path,"features":features,"nr_images":nr_images,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"hash":hash_,"xtra_in":xtra_in})
return SelectedFiles
def items_available(self):
"""
Function grabs all information from table_dragdrop. Checked and Unchecked
Does not load rtdc_ds (save time)
"""
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"NotSpecified","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
return SelectedFiles
def items_clicked_no_rtdc_ds(self):
#This function checks, which data has been checked on table_dragdrop and returns the necessary data
rowCount = self.table_dragdrop.rowCount()
#Collect urls to files that are checked
SelectedFiles = []
for rowPosition in range(rowCount):
#get the filename/path
rtdc_path = str(self.table_dragdrop.cellWidget(rowPosition, 0).text())
#get the index (celltype) of it
index = int(self.table_dragdrop.cellWidget(rowPosition, 1).value())
#How many Events contains dataset in total?
nr_events = int(self.table_dragdrop.item(rowPosition, 5).text())
#how many cells/epoch during training or validation?
nr_events_epoch = int(self.table_dragdrop.item(rowPosition, 6).text())
#should the dataset be randomized (shuffled?)
shuffle = bool(self.table_dragdrop.item(rowPosition, 8).checkState())
#should the images be zoomed in/out by a factor?
zoom_factor = float(self.table_dragdrop.item(rowPosition, 9).text())
#should xtra_data be used for training?
xtra_in = bool(self.table_dragdrop.item(rowPosition, 10).checkState())
#is it checked for train?
cb_t = self.table_dragdrop.item(rowPosition, 2)
if cb_t.checkState() == QtCore.Qt.Checked and nr_events_epoch>0: #add to training files if the user wants more than 0 images per epoch
#SelectedFiles.append({"nr_images":nr_events,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch})
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"Train","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
cb_v = self.table_dragdrop.item(rowPosition, 3)
if cb_v.checkState() == QtCore.Qt.Checked and nr_events_epoch>0:
#SelectedFiles.append({"nr_images":nr_events,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch})
SelectedFiles.append({"rtdc_path":rtdc_path,"class":index,"TrainOrValid":"Valid","nr_events":nr_events,"nr_events_epoch":nr_events_epoch,"shuffle":shuffle,"zoom_factor":zoom_factor,"xtra_in":xtra_in})
return SelectedFiles
def uncheck_if_zero(self,item):
#If the Nr. of epochs is changed to zero:
#uncheck the dataset for train/valid
row = item.row()
col = item.column()
#if the user changed Nr. of cells per epoch to zero
if col==6 and int(item.text())==0:
#get the checkstate of the coresponding T/V
cb_t = self.table_dragdrop.item(row, 2)
if cb_t.checkState() == QtCore.Qt.Checked:
cb_t.setCheckState(False)
cb_v = self.table_dragdrop.item(row, 3)
if cb_v.checkState() == QtCore.Qt.Checked:
cb_v.setCheckState(False)
def item_click(self,item):
colPosition = item.column()
rowPosition = item.row()
#if Shuffle was clicked (col=8), check if this checkbox is not deactivated
if colPosition==8:
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==False:
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_images = rtdc_ds["events"]["image"].len()
columnPosition = 6
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, nr_images)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==True:
#Inspect this table item. If shuffle was checked before, it will be grayed out. Invert normal cell then
item = self.table_dragdrop.item(rowPosition, 6)
item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
if len(self.ram)>0:
self.statusbar.showMessage("Make sure to update RAM (->Edit->Data to RAM now) after changing Data-set",2000)
self.ram = dict() #clear the ram, since the data was changed
self.dataOverviewOn()
#When data is clicked, always reset the validation set (only important for 'Assess Model'-tab)
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
def dataOverviewOn(self):
if self.groupBox_DataOverview.isChecked()==True:
if self.threadpool_single_queue == 0:
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
def dataOverviewOn_OnChange(self,item):
#When a value is entered in Events/Epoch and enter is hit
#there is no update of the table called
if self.groupBox_DataOverview.isChecked()==True:
if self.threadpool_single_queue == 0:
rowPosition = item.row()
colPosition = item.column()
if colPosition==6:#one when using the spinbox (Class),or when entering a new number in "Events/Epoch", the table is not updated.
#get the new value
nr_cells = self.table_dragdrop.cellWidget(rowPosition, colPosition)
if nr_cells==None:
return
else:
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
def update_data_overview(self,SelectedFiles):
#Check if there are custom class names (determined by user)
rows = self.tableWidget_Info.rowCount()
self.classes_custom = [] #by default assume there are no custom classes
classes_custom_bool = False
if rows>0:#if >0, then there is already a table existing
classes,self.classes_custom = [],[]
for row in range(rows):
try:
class_ = self.tableWidget_Info.item(row,0).text()
if class_.isdigit():
classes.append(class_)#get the classes
except:
pass
try:
self.classes_custom.append(self.tableWidget_Info.item(row,3).text())#get the classes
except:
pass
classes = np.unique(classes)
if len(classes)==len(self.classes_custom):#equal in length
same = [i for i, j in zip(classes, self.classes_custom) if i == j] #which items are identical?
if len(same)==0:
#apparently there are custom classes! Save them
classes_custom_bool = True
if len(SelectedFiles)==0:#reset the table
#Table1
#Prepare a table in tableWidget_Info
self.tableWidget_Info.setColumnCount(0)
self.tableWidget_Info.setRowCount(0)
self.tableWidget_Info.setColumnCount(4)
header = self.tableWidget_Info.horizontalHeader()
header_labels = ["Class","Events tot.","Events/Epoch","Name"]
self.tableWidget_Info.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
return
#Prepare a table in tableWidget_Info
self.tableWidget_Info.setColumnCount(0)
self.tableWidget_Info.setRowCount(0)
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
self.tableWidget_Info.setColumnCount(4)
header = self.tableWidget_Info.horizontalHeader()
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = 2*nr_ind+2 #add two rows for intermediate headers (Train/Valid)
self.tableWidget_Info.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Events tot.","Events/Epoch","Name"]
self.tableWidget_Info.setHorizontalHeaderLabels(header_labels)
#self.tableWidget_Info.resizeColumnsToContents()
header = self.tableWidget_Info.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
#Training info
rowPosition = 0
self.tableWidget_Info.setSpan(rowPosition, 0, 1, 2)
item = QtWidgets.QTableWidgetItem("Train. data")
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.tableWidget_Info.setItem(rowPosition, 0, item)
rowPosition += 1
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
classes = np.unique(indices_train)
if len(classes)==len(self.classes_custom):
classes_custom_bool = True
else:
classes_custom_bool = False
#display information for each individual class
for index_ in range(len(classes)):
#for index in np.unique(indices_train):
index = classes[index_]
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info.setItem(rowPosition, 0, item)
#Get the training files of that index
ind = np.where(indices_train==index)[0]
SelectedFiles_train_index = np.array(SelectedFiles_train)[ind]
#Total nr of cells for each class
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
self.tableWidget_Info.setItem(rowPosition, 1, item)
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_train_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
if classes_custom_bool==False:
item.setData(QtCore.Qt.EditRole,str(index))
else:
item.setData(QtCore.Qt.EditRole,self.classes_custom[index_])
self.tableWidget_Info.setItem(rowPosition, 3, item)
rowPosition += 1
#Validation info
self.tableWidget_Info.setSpan(rowPosition, 0, 1, 2)
item = QtWidgets.QTableWidgetItem("Val. data")
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.tableWidget_Info.setItem(rowPosition, 0, item)
rowPosition += 1
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
#Total nr of cells for each index
for index in np.unique(indices_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(indices_valid==index)[0]
SelectedFiles_valid_index = np.array(SelectedFiles_valid)[ind]
nr_events = [int(selectedfile["nr_events"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events)))
self.tableWidget_Info.setItem(rowPosition, 1, item)
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info.setItem(rowPosition, 2, item)
rowPosition += 1
self.tableWidget_Info.resizeColumnsToContents()
self.tableWidget_Info.resizeRowsToContents()
def update_data_overview_2(self,SelectedFiles):
if len(SelectedFiles)==0:
#Table2
self.tableWidget_Info_2.setColumnCount(0)
self.tableWidget_Info_2.setRowCount(0)
#In case user specified X_valid and y_valid before, delete it again:
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setColumnCount(4)
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
return
#Prepare a table in tableWidget_Info
self.tableWidget_Info_2.setColumnCount(0)
self.tableWidget_Info_2.setRowCount(0)
#In case user specified X_valid and y_valid before, delete it again:
self.ValidationSet = None
self.Metrics = dict() #Also reset the metrics
indices = [SelectedFiles[i]["class"] for i in range(len(SelectedFiles))]
#Initiate the table with 4 columns : this will be ["Index","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setColumnCount(4)
nr_ind = len(set(indices)) #each index could occur for train and valid
nr_rows = nr_ind
self.tableWidget_Info_2.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
rowPosition = 0
#Validation info
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
#Total nr of cells for each index
for index in np.unique(indices_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(indices_valid==index)[0]
SelectedFiles_valid_index = np.array(SelectedFiles_valid)[ind]
nr_events_epoch = [int(selectedfile["nr_events_epoch"]) for selectedfile in SelectedFiles_valid_index]
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info_2.setItem(rowPosition, 1, item)
#Column for color
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, "")
item.setBackground(QtGui.QColor(self.colorsQt[index]))
self.tableWidget_Info_2.setItem(rowPosition, 2, item)
#Column for User specified name
item = QtWidgets.QTableWidgetItem()
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 3, item)
rowPosition += 1
self.tableWidget_Info_2.resizeColumnsToContents()
self.tableWidget_Info_2.resizeRowsToContents()
def tableWidget_Info_2_click(self,item):
if item is not None:
if item.column()==2:
tableitem = self.tableWidget_Info_2.item(item.row(), item.column())
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
def tableWidget_HistoryItems_dclick(self,item):
if item is not None:
tableitem = self.tableWidget_HistoryItems.item(item.row(), item.column())
if str(tableitem.text())!="Show saved only":
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
self.update_historyplot()
def select_all(self,col):
"""
Check/Uncheck items on table_dragdrop
"""
apply_at_col = [2,3,8,10]
if col not in apply_at_col:
return
#otherwiese continue
rows = range(self.table_dragdrop.rowCount()) #Number of rows of the table
tableitems = [self.table_dragdrop.item(row, col) for row in rows]
checkStates = [tableitem.checkState() for tableitem in tableitems]
#Checked?
checked = [state==QtCore.Qt.Checked for state in checkStates]
if set(checked)=={True}:#all are checked!
#Uncheck all!
for tableitem in tableitems:
tableitem.setCheckState(QtCore.Qt.Unchecked)
else:#otherwise check all
for tableitem in tableitems:
tableitem.setCheckState(QtCore.Qt.Checked)
#If shuffle column was clicked do some extra
if col==8:
for rowPosition in rows:
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==False:
rtdc_path = self.table_dragdrop.cellWidget(rowPosition, 0).text()
rtdc_path = str(rtdc_path)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_images = rtdc_ds["events"]["image"].len()
columnPosition = 6
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, nr_images)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.table_dragdrop.setItem(rowPosition, columnPosition, item)
if bool(self.table_dragdrop.item(rowPosition, 8).checkState())==True:
#Inspect this table item. If shuffle was checked before, it will be grayed out. Invert normal cell then
item = self.table_dragdrop.item(rowPosition, 6)
item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
#Finally, update the Data-Overview-Box
self.dataOverviewOn()#update the overview box
def item_dclick(self, item):
#Check/Uncheck if item is from column 2 or 3
tableitem = self.table_dragdrop.item(item.row(), item.column())
if item.column() in [2,3]:
#If the item is unchecked ->check it!
if tableitem.checkState() == QtCore.Qt.Unchecked:
tableitem.setCheckState(QtCore.Qt.Checked)
#else, the other way around
elif tableitem.checkState() == QtCore.Qt.Checked:
tableitem.setCheckState(QtCore.Qt.Unchecked)
#Show example image if item on column 0 was dclicked
if item.column() == 0:
#rtdc_path = str(item.text())
#rtdc_path = tableitem.text()
rtdc_path = self.table_dragdrop.cellWidget(item.row(), item.column()).text()
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
nr_images = rtdc_ds["events"]["image"].len()
ind = np.random.randint(0,nr_images)
img = rtdc_ds["events"]["image"][ind]
if len(img.shape)==2:
height, width = img.shape
channels = 1
elif len(img.shape)==3:
height, width, channels = img.shape
else:
print("Invalid image format: "+str(img.shape))
return
self.w = MyPopup()
self.gridLayout_w = QtWidgets.QGridLayout(self.w)
self.label_image = QtWidgets.QLabel(self.w)
self.label_cropimage = QtWidgets.QLabel(self.w)
#zoom image such that longest side is 512
zoom_factor = np.round(float(512.0/np.max(img.shape)),0)
#Get the order, specified in Image processing->Zoom Order
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Convert to corresponding cv2 zooming method
zoom_interpol_method = aid_img.zoom_arguments_scipy2cv(zoom_factor,zoom_order)
img_zoomed = cv2.resize(img, dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
if channels==1:
height, width = img_zoomed.shape
if channels==3:
height, width, _ = img_zoomed.shape
if channels==1:
qi=QtGui.QImage(img_zoomed.data, width, height,width, QtGui.QImage.Format_Indexed8)
if channels==3:
qi = QtGui.QImage(img_zoomed.data,img_zoomed.shape[1], img_zoomed.shape[0], QtGui.QImage.Format_RGB888)
self.label_image.setPixmap(QtGui.QPixmap.fromImage(qi))
self.gridLayout_w.addWidget(self.label_image, 1,1)
#get the location of the cell
rowPosition = item.row()
pix = float(self.table_dragdrop.item(rowPosition, 7).text())
#pix = rtdc_ds.config["imaging"]["pixel size"]
PIX = pix
pos_x,pos_y = rtdc_ds["events"]["pos_x"][ind]/PIX,rtdc_ds["events"]["pos_y"][ind]/PIX
cropsize = self.spinBox_imagecrop.value()
y1 = int(round(pos_y))-cropsize/2
x1 = int(round(pos_x))-cropsize/2
y2 = y1+cropsize
x2 = x1+cropsize
#Crop the image
img_crop = img[int(y1):int(y2),int(x1):int(x2)]
#zoom image such that the height gets the same as for non-cropped img
zoom_factor = float(img_zoomed.shape[0])/img_crop.shape[0]
if zoom_factor == np.inf:
factor = 1
if self.actionVerbose.isChecked()==True:
print("Set resize factor to 1. Before, it was: "+str(factor))
#Get the order, specified in Image processing->Zoom Order
zoom_order = str(self.comboBox_zoomOrder.currentText()) #
zoom_interpol_method = aid_img.zoom_arguments_scipy2cv(zoom_factor,zoom_order)
img_crop = cv2.resize(img_crop, dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
if channels==1:
height, width = img_crop.shape
qi=QtGui.QImage(img_crop.data, width, height,width, QtGui.QImage.Format_Indexed8)
if channels==3:
height, width, _ = img_crop.shape
qi = QtGui.QImage(img_crop.data,width, height, QtGui.QImage.Format_RGB888)
self.label_cropimage.setPixmap(QtGui.QPixmap.fromImage(qi))
self.gridLayout_w.addWidget(self.label_cropimage, 1,2)
self.w.show()
def get_norm_from_modelparafile(self):
#Get the normalization method from a modelparafile
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta file (*meta.xlsx)")
filename = filename[0]
if len(str(filename))==0:
return
norm = pd.read_excel(filename,sheet_name='Parameters')["Normalization"]
norm = str(norm[0])
index = self.comboBox_Normalization.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization.setCurrentIndex(index)
self.w.close()
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid normalization method was specified.\
Likely this version of AIDeveloper does not support that normalization method\
Please define a valid normalization method")
msg.setDetailedText("Supported normalization methods are: "+"\n".join(self.norm_methods))
msg.setWindowTitle("Invalid Normalization method")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("Invalid Normalization method")
def update_plottingTab(self):
#Get current text of combobox (url to data set)
url = str(self.comboBox_chooseRtdcFile.currentText())
if len(url)==0:
return
failed,rtdc_ds = aid_bin.load_rtdc(url)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
keys = list(rtdc_ds["events"].keys())
#find keys of image_channels
keys_0d,keys_1d,keys_2d = [],[],[]
for key in keys:
if type(rtdc_ds["events"][key])==h5py._hl.dataset.Dataset:
shape = rtdc_ds["events"][key].shape
if len(shape)==1: #zero-dimensional info (single number per cell)
keys_0d.append(key)
elif len(shape)==2: #one-dimensional info (multiple numbers per cell)
keys_1d.append(key)
elif len(shape)==3: #two-dimensional info (images)
keys_2d.append(key)
#add the traces to the 1d features
if "trace" in keys:
for key_trace in list(rtdc_ds["events"]["trace"].keys()):
keys_1d.append(key_trace+" (RTFDC)")
#Sort keys_2d: "image" first; "mask" last
keys_2d.insert(0, keys_2d.pop(keys_2d.index("image")))
keys_2d.insert(len(keys_2d), keys_2d.pop(keys_2d.index("mask")))
#Fill those feautues in the comboboxes at the scatterplot
self.comboBox_featurex.addItems(keys_0d)
self.comboBox_featurey.addItems(keys_0d)
#check if masks or contours are available
cont_available = "mask" in keys or "contour" in keys
self.checkBox_contour.setEnabled(cont_available)
self.checkBox_contour.setChecked(cont_available)
#Centroid is always available (prerequisite for AIDeveloper)
self.checkBox_centroid.setEnabled(True)
self.checkBox_centroid.setChecked(True)
#Intialize option menus
self.contour_options_nr = 0
self.centroid_options_nr = 0
self.show_1d_options_nr = 0
self.show_2d_options_nr = 0
self.init_contour_options(keys_2d)
self.init_centroid_options(keys_1d)
self.init_2d_options(keys_2d)
self.init_1d_options(keys_1d)
def init_contour_options(self,keys_2d):
print("Work in progress")
# self.popup_layercontrols = MyPopup()
# self.popup_layercontrols_ui = frontend.Ui_LayerControl()
# self.popup_layercontrols_ui.setupUi(self.popup_layercontrols,keys_2d) #open a popup
def init_centroid_options(self,keys_image):
print("Work in progress")
# self.popup_centroid_options = MyPopup()
# self.popup_centroid_options_ui = aid_frontend.Ui_centroid_options()
# self.popup_centroid_options_ui.setupUi(self.popup_centroid_options,keys_image) #open a popup
def init_2d_options(self,keys_2d):
#Initialize 2d Option Menu. Range values are saved and manipulated here
self.popup_2dOptions = MyPopup()
self.popup_2dOptions_ui = aid_frontend.Ui_2dOptions()
self.popup_2dOptions_ui.setupUi(self.popup_2dOptions,keys_2d) #open a popup
def init_1d_options(self,keys_1d):
self.popup_1dOptions = MyPopup()
self.popup_1dOptions_ui = aid_frontend.Ui_1dOptions()
self.popup_1dOptions_ui.setupUi(self.popup_1dOptions,keys_1d) #open a popup
def show_contour_options():
self.contour_options_nr += 1
print("Work in progress")
def show_centroid_options(self):
print("Work in progress")
self.centroid_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.centroid_options_nr==1:
for iterator in range(len(self.popup_layercontrols_ui.spinBox_minChX)):
print(1)
def show_2d_options(self):
self.show_2d_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.show_2d_options_nr==1:
for iterator in range(len(self.popup_2dOptions_ui.spinBox_minChX)):
slider = self.popup_2dOptions_ui.horizontalSlider_chX[iterator]
slider.startValueChanged.connect(lambda _, b=None: self.put_image(ind=b))
slider.endValueChanged.connect(lambda _, b=None: self.put_image(ind=b))
checkBox = self.popup_2dOptions_ui.checkBox_show_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_image(ind=b))
comboBox = self.popup_2dOptions_ui.comboBox_cmap_chX[iterator]
comboBox.currentIndexChanged.connect(lambda _, b=None: self.put_image(ind=b))
checkBox = self.popup_2dOptions_ui.checkBox_auto_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_image(ind=b))
self.popup_2dOptions.show()
def show_1d_options(self):
self.show_1d_options_nr += 1
#self.popup_layercontrols_ui.pushButton_close.clicked.connect(self.visualization_settings)
if self.show_1d_options_nr==1:
for iterator in range(len(self.popup_1dOptions_ui.checkBox_show_chX)):
checkBox = self.popup_1dOptions_ui.checkBox_show_chX[iterator]
checkBox.stateChanged.connect(lambda _, b=None: self.put_line(index=b))
comboBox = self.popup_1dOptions_ui.comboBox_cmap_chX[iterator]
comboBox.clicked.connect(lambda _, b=None: self.put_line(index=b))
self.popup_1dOptions.show()
def activate_deactivate_spinbox(self,newstate):
#get the checkstate of the Input model crop
if newstate==2:
#activate the spinbox
self.spinBox_imagecrop.setEnabled(True)
elif newstate==0:
self.spinBox_imagecrop.setEnabled(False)
def gray_or_rgb_augmentation(self,index):
#When Color-Mode is changed:
#Get the new colormode:
new_colormode = self.colorModes[index]
#when the new Color Mode is Grayscale, disable saturation and hue augmentation
if new_colormode=="Grayscale":
self.checkBox_contrast.setEnabled(True)
self.checkBox_contrast.setChecked(True)
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
self.checkBox_saturation.setEnabled(False)
self.checkBox_saturation.setChecked(False)
self.doubleSpinBox_saturationLower.setEnabled(False)
self.doubleSpinBox_saturationHigher.setEnabled(False)
self.checkBox_hue.setEnabled(False)
self.checkBox_hue.setChecked(False)
self.doubleSpinBox_hueDelta.setEnabled(False)
elif new_colormode=="RGB":
self.checkBox_contrast.setEnabled(True)
self.checkBox_contrast.setChecked(True)
self.doubleSpinBox_contrastLower.setEnabled(True)
self.doubleSpinBox_contrastHigher.setEnabled(True)
self.checkBox_saturation.setEnabled(True)
self.checkBox_saturation.setChecked(True)
self.doubleSpinBox_saturationLower.setEnabled(True)
self.doubleSpinBox_saturationHigher.setEnabled(True)
self.checkBox_hue.setEnabled(True)
self.checkBox_hue.setChecked(True)
self.doubleSpinBox_hueDelta.setEnabled(True)
else:
print("Invalid Color Mode")
def onClick(self,points,pointermethod):
#delete the last item if the user selected already one:
try:
self.scatter_xy.removeItem(self.point_clicked)
except:
pass
if pointermethod=="point":
points = points[0]
p = points.pos()
clicked_x, clicked_y = p.x(), p.y()
a1 = (clicked_x)/float(np.max(self.feature_x))
a2 = (clicked_y)/float(np.max(self.feature_y))
#Which is the closest scatter point?
dist = np.sqrt(( a1-self.scatter_x_norm )**2 + ( a2-self.scatter_y_norm )**2)
index = np.argmin(dist)
elif pointermethod=="index":
index = points
clicked_x = self.feature_x[index]
clicked_y = self.feature_y[index]
self.point_clicked = pg.ScatterPlotItem()
self.point_clicked.setData([clicked_x], [clicked_y],brush="r",symbol='o',symbolPen="w",size=15)
self.scatter_xy.addItem(self.point_clicked)
#self.scatter_xy.plot([clicked_x], [clicked_y],pen=None,symbol='o',symbolPen='w',clear=False)
self.point_was_selected_before = True
#I dont care if the user click or used the slider->always adjust spinbox and slider without running the onChange functions
self.changedbyuser = False
self.spinBox_cellInd.setValue(index)
self.horizontalSlider_cellInd.setValue(index)
self.changedbyuser = True
self.put_image(index)
self.put_line(index)
def put_image(self,ind):
#check that the user is looking at the plotting tab
curr_ind = self.tabWidget_Modelbuilder.currentIndex()
if curr_ind!=3:
return
try:
self.widget_showCell.removeItem(self.dot)
except:
pass
try:
self.widget_showCell.removeItem(self.plot_contour)
except:
pass
if ind==None:
index = int(self.spinBox_cellInd.value())
else:
index = ind
rtdc_ds = self.rtdc_ds
#which channel shouldbe displayed
channels = len(self.popup_2dOptions_ui.spinBox_minChX)
keys_2d = [self.popup_2dOptions_ui.label_layername_chX[i].text() for i in range(channels)]
#Define variable on self that carries all image information
if channels==1:
img = np.expand_dims(rtdc_ds["events"]["image"][index],-1)
elif channels>1:
img = np.stack( [rtdc_ds["events"][key][index] for key in keys_2d] ,axis=-1)
if len(img.shape)==2:
channels = 1
elif len(img.shape)==3:
height, width, channels = img.shape
else:
print("Invalid image format: "+str(img.shape))
return
color_mode = str(self.comboBox_GrayOrRGB_2.currentText())
if color_mode=="Grayscale": #Slider allows to show individual layers: each is shown as grayscale
img = img
elif color_mode == "RGB":#User can define, which layers are shown in R,G,and B
#Retrieve the setting from self.popup_layercontrols_ui
ui_item = self.popup_2dOptions_ui
layer_names = [obj.text() for obj in ui_item.label_layername_chX]
layer_active = [obj.isChecked() for obj in ui_item.checkBox_show_chX]
layer_range = [obj.getRange() for obj in ui_item.horizontalSlider_chX]
layer_auto = [obj.isChecked() for obj in ui_item.checkBox_auto_chX]
layer_cmap = [obj.currentText() for obj in ui_item.comboBox_cmap_chX]
#Assemble the image according to the settings in self.popup_layercontrols_ui
#Find activated layers for each color:
ind_active_r,ind_active_g,ind_active_b = [],[],[]
for ch in range(len(layer_cmap)):
#for color,active in zip(layer_cmap,layer_active):
if layer_cmap[ch]=="Red" and layer_active[ch]==True:
ind_active_r.append(ch)
if layer_cmap[ch]=="Green" and layer_active[ch]==True:
ind_active_g.append(ch)
if layer_cmap[ch]=="Blue" and layer_active[ch]==True:
ind_active_b.append(ch)
if len(ind_active_r)>0:
img_ch = img[:,:,np.array(ind_active_r)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_r)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_r)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_r = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_r = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
if len(ind_active_g)>0:
img_ch = img[:,:,np.array(ind_active_g)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_g)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_g)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_g = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_g = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
if len(ind_active_b)>0:
img_ch = img[:,:,np.array(ind_active_b)]
layer_range_ch = np.array(layer_range)[np.array(ind_active_b)] #Range of all red channels
layer_auto_ch = np.array(layer_auto)[np.array(ind_active_b)] #Automatic range
#Scale each red channel according to layer_range
for layer in range(img_ch.shape[-1]):
limits,auto = layer_range_ch[layer],layer_auto_ch[layer]
img_ch[:,:,layer] = aid_img.clip_contrast(img=img_ch[:,:,layer],low=limits[0],high=limits[1],auto=auto)
img_b = np.mean(img_ch,axis=-1).astype(np.uint8)
else:
img_b = np.zeros(shape=(img.shape[0],img.shape[1]),dtype=np.uint8)
#Assemble image by stacking all layers
img = np.stack([img_r,img_g,img_b],axis=-1)
#Get the levels of the previous frame
levels_init = self.widget_showCell.getLevels()
if levels_init==(0,1.0):
levels_init = (0,255)
#Get the layer index of the previous frame
index_ = self.widget_showCell.currentIndex
if color_mode=="Grayscale":
self.widget_showCell.setImage(img.T,autoRange=False,levels=levels_init,levelMode="mono")
self.widget_showCell.setCurrentIndex(index_)
elif color_mode=="RGB":
self.widget_showCell.setImage(np.swapaxes(img,0,1))
pix = rtdc_ds.attrs["imaging:pixel size"]
pos_x = rtdc_ds["events"]["pos_x"][index]/pix
pos_y = rtdc_ds["events"]["pos_y"][index]/pix
#Indicate the centroid of the cell
if self.checkBox_centroid.isChecked():
self.dot = pg.CircleROI(pos=(pos_x-2, pos_y-2), size=4, pen=QtGui.QPen(QtCore.Qt.red, 0.1), movable=False)
self.widget_showCell.getView().addItem(self.dot)
self.widget_showCell.show()
if self.checkBox_contour.isChecked():
#get the contour based on the mask
contour,_ = cv2.findContours(rtdc_ds["events"]["mask"][index], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour = contour[0][:,0,:]
self.plot_contour = pg.PlotCurveItem(contour[:,0],contour[:,1],width=6,pen="r")
self.widget_showCell.getView().addItem(self.plot_contour)
def put_line(self,index):
curr_ind = self.tabWidget_Modelbuilder.currentIndex()
if curr_ind!=3:
return
#Fluorescence traces: clear first
try:
self.plot_fl_trace_.clear() #clear the plot
self.plot_fl_trace.clear() #clear the plot
except:
pass
if index==None:
index = int(self.spinBox_cellInd.value())
rtdc_ds = self.rtdc_ds
feature_keys = list(rtdc_ds.keys())
#which features shouldbe displayed
features_nr = len(self.popup_1dOptions_ui.checkBox_show_chX)
keys_1d = [self.popup_1dOptions_ui.checkBox_show_chX[i].text() for i in range(features_nr)]
keys_1d_on = [self.popup_1dOptions_ui.checkBox_show_chX[i].isChecked() for i in range(features_nr)]
colors = [self.popup_1dOptions_ui.comboBox_cmap_chX[i].palette().button().color() for i in range(features_nr)]
colors = [list(c.getRgb()) for c in colors]
colors = [tuple(c) for c in colors]
ind = np.where(np.array(keys_1d_on)==True)[0]
keys_1d = list(np.array(keys_1d)[ind])
colors = list(np.array(colors)[ind])
for key_1d,color in zip(keys_1d,colors):
if key_1d.endswith(" (RTFDC)"):
key_1d = key_1d.split(" (RTFDC)")[0]
trace_flx = rtdc_ds["events"]["trace"][key_1d][index]
pencolor = pg.mkPen(color, width=2)
self.plot_fl_trace_ = self.plot_fl_trace.plot(range(len(trace_flx)),trace_flx,width=6,pen=pencolor,clear=False)
# if "fl1_max" in feature_keys and "fl1_pos" in feature_keys: #if also the maxima and position of the max are available: use it to put the region accordingly
# fl1_max,fl1_pos = rtdc_ds["events"]["fl1_max"][index],rtdc_ds["events"]["fl1_pos"][index]
else:
values = rtdc_ds["events"][key_1d][index]
pencolor = pg.mkPen(color, width=2)
self.plot_fl_trace_ = self.plot_fl_trace.plot(range(len(trace_flx)),trace_flx,width=6,pen=pencolor,clear=False)
#get the maximum of [fl1_max,fl2_max,fl3_max] and put the region to the corresponding fl-position
# ind = np.argmax(np.array([fl1_max,fl2_max,fl3_max]))
# region_pos = np.array([fl1_pos,fl2_pos,fl3_pos])[ind] #this region is already given in us. translate this back to range
# peak_height = np.array([fl1_max,fl2_max,fl3_max])[ind]
# sample_rate = rtdc_ds.attrs["fluorescence:sample rate"]
# fl_pos_ind = float((sample_rate*region_pos))/1E6 #
# #Indicate the used flx_max and flx_pos by a scatter dot
# self.peak_dot = self.plot_fl_trace.plot([float(fl_pos_ind)], [float(peak_height)],pen=None,symbol='o',symbolPen='w',clear=False)
def onScatterClick(self,event, points):
pointermethod = 'point'
if self.changedbyuser:
self.onClick(points,pointermethod)
def onIndexChange(self,index):
pointermethod = 'index'
if self.changedbyuser:
self.onClick(index,pointermethod)
#Set self.changedbyuser to False and change the spinbox and slider. changedbyuser=False prevents onClick function
self.changedbyuser = False
self.spinBox_cellInd.setValue(index)
self.horizontalSlider_cellInd.setValue(index)
self.changedbyuser = True
def updateScatterPlot(self):
#If the Plot is updated, delete the dot in the cell-image
try:
self.widget_showCell.removeItem(self.dot)
except:
pass
try:
self.scatter_xy.removeItem(self.point_clicked)
except:
pass
self.point_was_selected_before = False
#read url from current comboBox_chooseRtdcFile
url = str(self.comboBox_chooseRtdcFile.currentText())
if len(url)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the 'Build' tab to load files first")
msg.setWindowTitle("No file selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
failed,rtdc_ds = aid_bin.load_rtdc(url)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.rtdc_ds = rtdc_ds
feature_x_name = str(self.comboBox_featurex.currentText())
feature_y_name = str(self.comboBox_featurey.currentText())
features = list(self.rtdc_ds["events"].keys())
if feature_x_name in features:
self.feature_x = self.rtdc_ds["events"][feature_x_name]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Feature on x axis is not contained in data set")
msg.setWindowTitle("Invalid x feature")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if feature_y_name in features:
self.feature_y = self.rtdc_ds["events"][feature_y_name]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Feature on y axis is not contained in data set")
msg.setWindowTitle("Invalid y feature")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.changedbyuser = True #variable used to prevent plotting if spinbox or slider is changed programmatically
#density estimation
kde = self.comboBox_kde.currentText()
if kde=="None":
brush = "b"
elif kde=="2d Histogram" or kde=="Gauss":
if kde=="2d Histogram":
density = aid_bin.kde_histogram(np.array(self.feature_x), np.array(self.feature_y))
elif kde=="Gauss":
density = aid_bin.kde_gauss(np.array(self.feature_x), np.array(self.feature_y))
density_min,density_max = np.min(density),np.max(density)
density = (density-density_min)/density_max
# define colormap
brush = []
from pyqtgraph.graphicsItems.GradientEditorItem import Gradients
cmap = pg.ColorMap(*zip(*Gradients["viridis"]["ticks"]))
for k in density:
brush.append(cmap.mapToQColor(k))
#Add plot
#self.scatter = self.scatter_xy.plot(np.array(self.feature_x), np.array(self.feature_y),symbolPen=None,pen=None,symbol='o',brush=brush[100],clear=True)
#try to remove existing scatterplot
try:
self.scatter_xy.removeItem(self.scatter)
except:
print("Not cleared")
self.scatter = pg.ScatterPlotItem()
self.scatter.setData(np.array(self.feature_x), np.array(self.feature_y),brush=brush,symbolPen=None,pen=None,symbol='o',size=10)
self.scatter_xy.addItem(self.scatter)
#pen=None,symbol='o',symbolPen=None,symbolBrush=density,clear=True)
self.scatter.sigClicked.connect(self.onScatterClick) #When scatterplot is clicked, show the desired cell
#Fill histogram for x-axis; widget_histx
y,x = np.histogram(self.feature_x, bins='auto')
self.hist_x.plot(x, y, stepMode=True, fillLevel=0, brush=(0,0,255,150),clear=True)
#Manually clear y hist first. Only clear=True did not do the job
self.hist_y.clear()
#Fill histogram for y-axis; widget_histy
y,x = np.histogram(self.feature_y, bins='auto')
curve = pg.PlotCurveItem(-1.*x, y, stepMode=True, fillLevel=0, brush=(0, 0, 255, 150),clear=True)
curve.rotate(-90)
self.hist_y.addItem(curve)
self.scatter_x_norm = (np.array(self.feature_x).astype(np.float32))/float(np.max(self.feature_x))
self.scatter_y_norm = (np.array(self.feature_y).astype(np.float32))/float(np.max(self.feature_y))
#Adjust the horizontalSlider_cellInd and spinBox_cellInd
self.horizontalSlider_cellInd.setSingleStep(1)
self.horizontalSlider_cellInd.setMinimum(0)
self.horizontalSlider_cellInd.setMaximum(len(self.feature_x)-1)
self.spinBox_cellInd.setMinimum(0)
self.spinBox_cellInd.setMaximum(len(self.feature_x)-1)
def selectPeakPos(self):
#Check if self.region exists
#If not, show a message and return:
if not hasattr(self, 'region'):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no region defined yet")
msg.setWindowTitle("No region defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Try to get the user defined peak position
if not hasattr(self, 'new_peak'):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no peak defined yet")
msg.setWindowTitle("No peak defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#how much rows are already in table?
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
self.tableWidget_showSelectedPeaks.setRowCount(rowcount+1)
rowPosition = rowcount
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["fl_max"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 0, item)
item = QtWidgets.QTableWidgetItem()
fl_pos_us = float(float(self.new_peak["fl_pos"])*float(1E6))/float(self.rtdc_ds.attrs["fluorescence:sample rate"])
item.setData(QtCore.Qt.EditRole,fl_pos_us)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 1, item)
item = QtWidgets.QTableWidgetItem()
pos_x_um = float(self.new_peak["pos_x"])*float(self.rtdc_ds.attrs["imaging:pixel size"])
item.setData(QtCore.Qt.EditRole,pos_x_um)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["fl_pos"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 3, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.new_peak["pos_x"]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 4, item)
self.tableWidget_showSelectedPeaks.resizeColumnsToContents()
self.tableWidget_showSelectedPeaks.resizeRowsToContents()
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def selectPeakRange(self):
new_region = self.region.getRegion()
region_width = np.max(new_region) - np.min(new_region) #in [samples]
sample_rate = self.rtdc_ds.attrs["fluorescence:sample rate"]
region_width = (float(region_width)/float(sample_rate))*1E6 #range[samples]*(1/sample_rate[1/s]) = range[s]; div by 1E6 to conver to us
self.region_width = region_width
#put this in the table
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.region_width))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
def onPeaksPlotClick(self,event, points):
points = points[0]
p = points.pos()
clicked_x, clicked_y = p.x(), p.y()
a1 = (clicked_x)/float(np.max(self.Pos_x))
a2 = (clicked_y)/float(np.max(self.Fl_pos))
#Which is the closest scatter point?
pos_x_norm = self.Pos_x/np.max(self.Pos_x)#normalized pos_x
fl_pos_norm = self.Fl_pos/np.max(self.Fl_pos)#normalized fl_pos
dist = np.sqrt(( a1-pos_x_norm )**2 + ( a2-fl_pos_norm )**2)
index = np.argmin(dist)
#Highlight this row
self.tableWidget_showSelectedPeaks.selectRow(index)
#Delete the highlighted rows
# try:
# self.actionRemoveSelectedPeaks_function()
# except:
# pass
def update_peak_plot(self):
#This function reads tableWidget_showSelectedPeaks and
#fits a function and
#puts fitting parameters on tableWidget_peakModelParameters
#read the data on tableWidget_showSelectedPeaks
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
Fl_pos,Pos_x = [],[]
for row in range(rowcount):
line = [float(self.tableWidget_showSelectedPeaks.item(row, col).text()) for col in [1,2]] #use the values for [us] and [um]
Fl_pos.append(line[0])
Pos_x.append(line[1])
self.Fl_pos = np.array(Fl_pos)
self.Pos_x = np.array(Pos_x)
self.selectedPeaksPlotPlot = self.selectedPeaksPlot.plot(self.Pos_x, self.Fl_pos,pen=None,symbol='o',symbolPen=None,symbolBrush='b',clear=True)
#if user clicks in the plot, show him the corresponding row in the table
self.selectedPeaksPlotPlot.sigPointsClicked.connect(self.onPeaksPlotClick)
if not hasattr(self, 'region_width'): #if there was no region_width defined yet...
#to get a reasonable initial range, use 20% of the nr. of availeble samples
samples_per_event = self.rtdc_ds.attrs["fluorescence:samples per event"]
self.region_width = 0.2*samples_per_event #width of the region in samples
#Convert to SI unit:
sample_rate = self.rtdc_ds.attrs["fluorescence:sample rate"]
self.region_width = (float(self.region_width)/float(sample_rate))*1E6 #range[samples]*(1/sample_rate[1/s]) = range[s]; div by 1E6 to convert to us
#which model should be used?
if str(self.comboBox_peakDetModel.currentText()) == "Linear dependency and max in range" and len(Pos_x)>1:
slope,intercept = np.polyfit(Pos_x, Fl_pos,deg=1) #Linear FIT, y=mx+n; y=FL_pos[us] x=Pos_x[um]
xlin = np.round(np.linspace(np.min(Pos_x),np.max(Pos_x),25),1)
ylin = intercept + slope*xlin
self.selectedPeaksPlot.plot(xlin, ylin,width=6,pen='b',clear=False)
#Put info to tableWidget_peakModelParameters
self.tableWidget_peakModelParameters.setColumnCount(2)
self.tableWidget_peakModelParameters.setRowCount(5)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Model")
self.tableWidget_peakModelParameters.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Linear dependency and max in range")
self.tableWidget_peakModelParameters.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(self.region_width))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Intercept [us]")
self.tableWidget_peakModelParameters.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(intercept))
self.tableWidget_peakModelParameters.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Slope [us/um]")
self.tableWidget_peakModelParameters.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(slope))
self.tableWidget_peakModelParameters.setItem(3, 1, item)
#Calculate velocity
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Velocity[m/s]")
self.tableWidget_peakModelParameters.setItem(4, 0, item)
velocity = float(1.0/float(slope))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(velocity))
self.tableWidget_peakModelParameters.setItem(4, 1, item)
def addHighestXPctPeaks(self):
#how many x%?
x_pct = float(self.doubleSpinBox_highestXPercent.value())
#Get the flourescence traces and maxima/positions of maxima
#->it could be that the user did not yet load the dataset:
if not hasattr(self,"rtdc_ds"):
#run the function updateScatterPlot()
self.updateScatterPlot()
trace = self.rtdc_ds["events"]["trace"]
fl_keys = list(trace.keys())
fl1_max,fl1_pos,fl2_max,fl2_pos,fl3_max,fl3_pos,pos_x = [],[],[],[],[],[],[]
for i in range(len(fl_keys)):
if "fl1_median" in fl_keys[i] and self.checkBox_fl1.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl1_max.append(trace_flx[ind])
fl1_pos.append(ind)
#Get the x% maxima
fl1_max = np.array(fl1_max)
fl1_pos = np.array(fl1_pos)
sorter = np.argsort(fl1_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl1_max))]
fl1_max = fl1_max[sorter]
fl1_pos = fl1_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
elif "fl2_median" in fl_keys[i] and self.checkBox_fl2.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl2_max.append(trace_flx[ind])
fl2_pos.append(ind)
#Get the x% maxima
fl2_max = np.array(fl2_max)
fl2_pos = np.array(fl2_pos)
sorter = np.argsort(fl2_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl2_max))]
fl2_max = fl2_max[sorter]
fl2_pos = fl2_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
elif "fl3_median" in fl_keys[i] and self.checkBox_fl3.isChecked():
for index in range(len(trace[fl_keys[i]])):
trace_flx = trace[fl_keys[i]][index]
ind = np.argmax(trace_flx)
fl3_max.append(trace_flx[ind])
fl3_pos.append(ind)
#Get the x% maxima
fl3_max = np.array(fl3_max)
fl3_pos = np.array(fl3_pos)
sorter = np.argsort(fl3_max)[::-1]
sorter = sorter[0:int(x_pct/100.0*len(fl3_max))]
fl3_max = fl3_max[sorter]
fl3_pos = fl3_pos[sorter]
pos_x.append(self.rtdc_ds["events"]["pos_x"][sorter])
#Add fl1 fl2 and fl3 information
flx_max = np.array(list(fl1_max)+list(fl2_max)+list(fl3_max))
flx_pos = np.array(list(fl1_pos)+list(fl2_pos)+list(fl3_pos))
pos_x_um = np.concatenate(np.atleast_2d(np.array(pos_x)))
pix = self.rtdc_ds.attrs["imaging:pixel size"]
pos_x = pos_x_um/pix #convert from um to pix
rowcount = self.tableWidget_showSelectedPeaks.rowCount()
self.tableWidget_showSelectedPeaks.setRowCount(rowcount+len(flx_max))
for i in range(len(flx_max)):
rowPosition = rowcount+i
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(flx_max[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 0, item)
item = QtWidgets.QTableWidgetItem()
fl_pos_us = float(float(flx_pos[i])*float(1E6))/float(self.rtdc_ds.attrs["fluorescence:sample rate"] )
item.setData(QtCore.Qt.EditRole,fl_pos_us)
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 1, item)
item = QtWidgets.QTableWidgetItem()
#pos_x_um = float(pos_x[i])*float(self.rtdc_ds.config["imaging"]["pixel size"])
item.setData(QtCore.Qt.EditRole,float(pos_x_um[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 2, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(flx_pos[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 3, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(pos_x[i]))
self.tableWidget_showSelectedPeaks.setItem(rowPosition, 4, item)
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def savePeakDetModel(self):
#Get tableWidget_peakModelParameters and write it to excel file
#Get filename from user:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save peak fitting model', Default_dict["Path of last model"],"Excel file (*.xlsx)")
filename = filename[0]
if len(filename)==0:
return
#add the suffix .csv
if not filename.endswith(".xlsx"):
filename = filename +".xlsx"
table = self.tableWidget_peakModelParameters
cols = table.columnCount()
header = range(cols)
rows = table.rowCount()
model_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
model_df.iloc[i, j] = table.item(i, j).text()
except:
model_df.iloc[i, j] = np.nan
table = self.tableWidget_showSelectedPeaks
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
rows = table.rowCount()
peaks_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
peaks_df.iloc[i, j] = table.item(i, j).text()
except:
peaks_df.iloc[i, j] = np.nan
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the MetaFile.xlsx
pd.DataFrame().to_excel(writer,sheet_name='Model') #initialize empty Sheet
model_df.to_excel(writer,sheet_name='Model') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='Peaks') #initialize empty Sheet
peaks_df.to_excel(writer,sheet_name='Peaks')
writer.save()
writer.close()
def loadPeakDetModel(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open peak fitting model', Default_dict["Path of last model"],"Excel file (*.xlsx)")
filename = filename[0]
if len(str(filename))==0:
return
peak_model_df = pd.read_excel(filename,sheet_name='Model')
model = peak_model_df.iloc[0,1]
if model=="Linear dependency and max in range":
#set the combobox accordingly
index = self.comboBox_peakDetModel.findText(model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_peakDetModel.setCurrentIndex(index)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Could not find a valid model in the chosen file. Did you accidentially load a session or history file?!")
msg.setWindowTitle("No valid model found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
range_ = float(peak_model_df.iloc[1,1])
intercept = float(peak_model_df.iloc[2,1])
slope = float(peak_model_df.iloc[3,1])
velocity = float(peak_model_df.iloc[4,1])
#put the information in the table
xlin = np.round(np.linspace(np.min(0),np.max(100),25),1)
ylin = intercept + slope*xlin
self.selectedPeaksPlot.plot(xlin, ylin,width=6,pen='b',clear=False)
#Put info to tableWidget_peakModelParameters
self.tableWidget_peakModelParameters.setColumnCount(2)
self.tableWidget_peakModelParameters.setRowCount(5)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Model")
self.tableWidget_peakModelParameters.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Linear dependency and max in range")
self.tableWidget_peakModelParameters.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Range [us]")
self.tableWidget_peakModelParameters.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(range_))
self.tableWidget_peakModelParameters.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Intercept [us]")
self.tableWidget_peakModelParameters.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(intercept))
self.tableWidget_peakModelParameters.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Slope [us/um]")
self.tableWidget_peakModelParameters.setItem(3, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(slope))
self.tableWidget_peakModelParameters.setItem(3, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, "Velocity[m/s]")
self.tableWidget_peakModelParameters.setItem(4, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(velocity))
self.tableWidget_peakModelParameters.setItem(4, 1, item)
def applyPeakModel_and_export(self):
#On which files should the action be performed?
Files = []
if self.radioButton_exportAll.isChecked():
#Grab all items of comboBox_chooseRtdcFile
Files = [self.comboBox_chooseRtdcFile.itemText(i) for i in range(self.comboBox_chooseRtdcFile.count())]
else:
file = self.comboBox_chooseRtdcFile.currentText()
Files.append(str(file))
#Get the model from tableWidget_peakModelParameters
table = self.tableWidget_peakModelParameters
cols = table.columnCount()
header = range(cols)
rows = table.rowCount()
model_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
model_df.iloc[i, j] = table.item(i, j).text()
except:
model_df.iloc[i, j] = np.nan
model = model_df.iloc[0,1]
if model == "Linear dependency and max in range":
range_us = float(model_df.iloc[1,1]) #[us]
intercept_us = float(model_df.iloc[2,1])
slope_us_um = float(model_df.iloc[3,1])
#velocity_m_s = float(model_df.iloc[4,1])
#Get a directory from the user!
folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select directory', Default_dict["Path of last model"])
if len(folder)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid directory")
msg.setWindowTitle("Invalid directory")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
for rtdc_path in Files:
path, rtdc_file = os.path.split(rtdc_path)
savename = os.path.join(folder,rtdc_file)
#Avoid to save to an existing file:
addon = 1
while os.path.isfile(savename):
savename = savename.split(".rtdc")[0]
if addon>1:
savename = savename.split("_"+str(addon-1))[0]
savename = savename+"_"+str(addon)+".rtdc"
addon += 1
print("Saving to : "+savename)
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Convert quantities to [index]
sample_rate = rtdc_ds.attrs["fluorescence:sample rate"]
range_ = (range_us*float(sample_rate))/1E6 #range was given in us->Divide by 1E6 to get to s and then multiply by the sample rate
# #check if an rtdc_ds is already chosen:
# if not hasattr(self,'rtdc_ds'):
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("No measurement chosen yet. Use 'Update' button")
# msg.setWindowTitle("No measurement")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# return
trace = rtdc_ds["events"]["trace"]
fl_keys = list(trace.keys()) #Which traces are available
fl1_max,fl1_pos,fl2_max,fl2_pos,fl3_max,fl3_pos,pos_x = [],[],[],[],[],[],[]
#Iterate over the available cells
pos_x = rtdc_ds["events"]["pos_x"] #is already given in [um]
indices = range(len(pos_x))
if model == "Linear dependency and max in range":
#Use the linear model to get the estimated location of the fluorescence peaks
fl_peak_position_us = intercept_us+slope_us_um*pos_x
#Convert to index
fl_peak_position_ = (fl_peak_position_us*float(sample_rate))/1E6
#Now we have the estimated peak position of each cell. Look at the traces on these spots
def ind_to_us(x):
return x*1E6/sample_rate
#iterate over the cells:
for cellindex in range(len(pos_x)):
#Iterate over the availble traces
for i in range(len(fl_keys)):
if "_median" in fl_keys[i]:
trace_flx = trace[fl_keys[i]][cellindex]
trace_pos = np.array(range(len(trace_flx)))
left = int(fl_peak_position_[cellindex]-range_/2.0)
right = int(fl_peak_position_[cellindex]+range_/2.0)
trace_flx_range = trace_flx[left:right]
trace_pos_range = trace_pos[left:right]
ind = np.argmax(trace_flx_range)
if "fl1_median" in fl_keys[i]:
fl1_max.append(trace_flx_range[ind])
fl1_pos.append(ind_to_us(trace_pos_range[ind]))
if "fl2_median" in fl_keys[i]:
fl2_max.append(trace_flx_range[ind])
fl2_pos.append(ind_to_us(trace_pos_range[ind]))
if "fl3_median" in fl_keys[i]:
fl3_max.append(trace_flx_range[ind])
fl3_pos.append(ind_to_us(trace_pos_range[ind]))
#Save those new fluorescence features into free spots in .rtdc file
#Those names can be found via dclab.dfn.feature_names called (userdef0...userdef9)
#TODO (dont use dclab anymore for saving)
#But just in case anyone uses that function?!
#get metadata of the dataset
meta = {}
# only export configuration meta data (no user-defined config)
for sec in dclab.definitions.CFG_METADATA:
if sec in ["fmt_tdms"]:
# ignored sections
continue
if sec in rtdc_ds.config:
meta[sec] = rtdc_ds.config[sec].copy()
#features = rtdc_ds._events.keys() #Get the names of the online features
compression = 'gzip'
nev = len(rtdc_ds)
#["Overwrite Fl_max and Fl_pos","Save to userdef"]
features = list(rtdc_ds["events"].keys())
if str(self.comboBox_toFlOrUserdef.currentText())=='Save to userdef':
features = features+["userdef"+str(i) for i in range(10)]
with dclab.rtdc_dataset.write_hdf5.write(path_or_h5file=savename,meta=meta, mode="append") as h5obj:
# write each feature individually
for feat in features:
# event-wise, because
# - tdms-based datasets don't allow indexing with numpy
# - there might be memory issues
if feat == "contour":
cont_list = [rtdc_ds["events"]["contour"][ii] for ii in indices]
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"contour": cont_list},
mode="append",
compression=compression)
elif feat == "userdef0":
if "fl1_median" in fl_keys:
print("writing fl1_max to userdef0")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef0": np.array(fl1_max)},
mode="append",
compression=compression)
elif feat == "userdef1":
if "fl2_median" in fl_keys:
print("writing fl2_max to userdef1")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef1": np.array(fl2_max)},
mode="append",
compression=compression)
elif feat == "userdef2":
if "fl3_median" in fl_keys:
print("writing fl3_max to userdef2")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef2": np.array(fl3_max)},
mode="append",
compression=compression)
elif feat == "userdef3":
if "fl1_pos" in features:
print("writing fl1_pos to userdef3")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef3": np.array(fl1_pos)},
mode="append",
compression=compression)
elif feat == "userdef4":
if "fl2_pos" in features:
print("writing fl2_pos to userdef4")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef4": np.array(fl2_pos)},
mode="append",
compression=compression)
elif feat == "userdef5":
if "fl3_pos" in features:
print("writing fl3_pos to userdef5")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"userdef5": np.array(fl3_pos)},
mode="append",
compression=compression)
elif feat in ["userdef"+str(i) for i in range(5,10)]:
pass
elif feat == "fl1_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl1_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl1_max)},mode="append",compression=compression)
elif feat == "fl2_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl2_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl2_max)},mode="append",compression=compression)
elif feat == "fl3_max":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl3_max")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl3_max)},mode="append",compression=compression)
elif feat == "fl1_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl1_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl1_pos)},mode="append",compression=compression)
elif feat == "fl2_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl2_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl2_pos)},mode="append",compression=compression)
elif feat == "fl3_pos":
if str(self.comboBox_toFlOrUserdef.currentText())=='Overwrite Fl_max and Fl_pos':
print("overwriting fl3_pos")
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: np.array(fl3_pos)},mode="append",compression=compression)
elif feat == "index":
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"index": np.array(indices)+1}, #ShapeOut likes to start with index=1
mode="append",
compression=compression)
elif feat in ["mask", "image"]:
# store image stacks (reduced file size and save time)
m = 64
if feat=='mask':
im0 = rtdc_ds["events"][feat][0]
if feat=="image":
im0 = rtdc_ds["events"][feat][0]
imstack = np.zeros((m, im0.shape[0], im0.shape[1]),
dtype=im0.dtype)
jj = 0
if feat=='mask':
image_list = [rtdc_ds["events"][feat][ii] for ii in indices]
elif feat=='image':
image_list = [rtdc_ds["events"][feat][ii] for ii in indices]
for ii in range(len(image_list)):
dat = image_list[ii]
#dat = rtdc_ds[feat][ii]
imstack[jj] = dat
if (jj + 1) % m == 0:
jj = 0
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: imstack},
mode="append",
compression=compression)
else:
jj += 1
# write rest
if jj:
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: imstack[:jj, :, :]},
mode="append",
compression=compression)
elif feat == "trace":
for tr in rtdc_ds["events"]["trace"].keys():
tr0 = rtdc_ds["events"]["trace"][tr][0]
trdat = np.zeros((nev, tr0.size), dtype=tr0.dtype)
jj = 0
trace_list = [rtdc_ds["events"]["trace"][tr][ii] for ii in indices]
for ii in range(len(trace_list)):
trdat[jj] = trace_list[ii]
jj += 1
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={"trace": {tr: trdat}},
mode="append",
compression=compression)
else:
dclab.rtdc_dataset.write_hdf5.write(h5obj,
data={feat: rtdc_ds["events"][feat][indices]},mode="append")
h5obj.close()
def partialtrainability_activated(self,on_or_off):
if on_or_off==False:#0 means switched OFF
self.lineEdit_partialTrainability.setText("")
#self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
#Also, remove the model from self!
self.model_keras = None
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(False)
self.lineEdit_LoadModelPath.setText("")#put the filename in the lineedit
#this happens when the user activated the expert option "partial trainability"
elif on_or_off==True:#2 means switched ON
#Has the user already chosen a model?
if self.model_keras == None: #if there is no model yet chosen
self.action_initialize_model(duties="initialize")
#If there is still no model...
if self.model_keras == None:# or self.model_keras_path==None: #if there is no model yet chosen
#Tell the user to initiate a model first!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>To use this option please first select and load a model. To do that choose/load a model in 'Define Model'-Tab and hit the button 'Initialize/Fit Model'. Choose to only initialize the model.</p></body></html>")
msg.setWindowTitle("Please load a model first")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Switch off
self.lineEdit_partialTrainability.setText("")
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(False)
self.lineEdit_LoadModelPath.setText("")
#self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
self.checkBox_partialTrainability.setChecked(False)
return
#Otherwise, there is a model on self and we can continue :)
#Collections are not supported
if type(self.model_keras)==tuple:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>Partial trainability is not available for collections of models. Please specify a single model.</p></body></html>")
msg.setWindowTitle("Collections of models not supported for collections of models")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Switch on lineedit and the button
#self.lineEdit_partialTrainability.setEnabled(True)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(True)#enables the lineEdit which shows the trainability status of each layer.
#Load trainability states of the model
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
Layer_train_status = [self.model_keras.layers[layerindex].trainable for layerindex in index]
self.lineEdit_partialTrainability.setText(str(Layer_train_status))#enables the lineEdit which shows the trainability status of each layer.
def partialTrainability(self):
self.popup_trainability = MyPopup()
self.popup_trainability_ui = aid_frontend.popup_trainability()
self.popup_trainability_ui.setupUi(self.popup_trainability) #open a popup to show the layers in a table
#One can only activate this function when there was a model loaded already!
#self.model_keras has to exist!!!
if self.model_keras == None: #if there is no model yet chosen
self.action_initialize_model(duties="initialize")
if self.model_keras == None: #if there is still no model...
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>To use this option please first select and load a model. To do that choose/load a model in 'Define Model'-Tab and hit the button 'Initialize/Fit Model'. Choose to only initialize the model.</p></body></html>")
msg.setWindowTitle("Please load a model first")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Switch this On in the final version
self.lineEdit_partialTrainability.setText("")
self.lineEdit_partialTrainability.setEnabled(False)#enables the lineEdit which shows the trainability status of each layer.
self.pushButton_partialTrainability.setEnabled(False)
return
#Fill information about the model
if self.radioButton_NewModel.isChecked():#a new model is loaded
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("New model")
elif self.radioButton_LoadRestartModel.isChecked():#a new model is loaded
load_model_path = str(self.lineEdit_LoadModelPath.text())
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("Restart model: "+load_model_path)
elif self.radioButton_LoadContinueModel.isChecked():#a new model is loaded
load_model_path = str(self.lineEdit_LoadModelPath.text())
self.popup_trainability_ui.lineEdit_pop_pTr_modelPath.setText("Continue model: "+load_model_path)
in_dim = self.model_keras.input_shape
#Retrieve the color_mode from the model (nr. of channels in last in_dim)
channels = in_dim[-1] #TensorFlow: channels in last dimension
out_dim = self.model_keras.output_shape[-1]
self.popup_trainability_ui.spinBox_pop_pTr_inpSize.setValue(int(in_dim[1]))
self.popup_trainability_ui.spinBox_pop_pTr_outpSize.setValue(int(out_dim))
if channels==1:
self.popup_trainability_ui.comboBox_pop_pTr_colorMode.addItem("Grayscale")
elif channels==3:
self.popup_trainability_ui.comboBox_pop_pTr_colorMode.addItem("RGB")
#Model summary to textBrowser_pop_pTr_modelSummary
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
self.popup_trainability_ui.textBrowser_pop_pTr_modelSummary.setText(summary)
#Work on the tableWidget_pop_pTr_layersTable
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
nr_layers = len(index) #total nr. of dense and conv layers with parameters
for rowNumber in range(nr_layers):
layerindex = index[rowNumber]
columnPosition = 0
layer = self.model_keras.layers[layerindex]
rowPosition = self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.rowCount()
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.insertRow(rowPosition)
Name = layer.name
item = QtWidgets.QTableWidgetItem(Name)
item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
item.setTextAlignment(QtCore.Qt.AlignCenter) # change the alignment
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition , columnPosition, item ) #
columnPosition = 1
layer_type = layer.__class__.__name__
item = QtWidgets.QTableWidgetItem(layer_type)
item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
item.setTextAlignment(QtCore.Qt.AlignCenter) # change the alignment
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition , columnPosition, item ) #
columnPosition = 2
Params = layer.count_params()
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, Params)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition, columnPosition, item)
columnPosition = 3
if layer_type == "Dense":
split_property = "units" #'units' are the number of nodes in dense layers
elif layer_type == "Conv2D":
split_property = "filters"
else:
print("other splitprop!")
return
layer_config = layer.get_config()
nr_units = layer_config[split_property] #units are either nodes or filters for dense and convolutional layer, respectively
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, int(nr_units))
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setItem(rowPosition, columnPosition, item)
columnPosition = 4
#for each item create a spinbopx (trainability)
spinb = QtWidgets.QDoubleSpinBox(self.popup_trainability_ui.tableWidget_pop_pTr_layersTable)
spinb.setMinimum(0)
spinb.setMaximum(1)
spinb.setSingleStep(0.1)
trainability = int(layer.trainable) #.trainable actually returns True or False. Make it integer
spinb.setValue(trainability) #this should be always 1
self.popup_trainability_ui.tableWidget_pop_pTr_layersTable.setCellWidget(rowPosition, columnPosition, spinb)
self.popup_trainability.show()
#self.popup_trainability_ui.pushButton_pop_pTr_reset.clicked.connect(self.pop_pTr_reset)
self.popup_trainability_ui.pushButton_pop_pTr_update.clicked.connect(self.pop_pTr_update_2)
self.popup_trainability_ui.pushButton_pop_pTr_ok.clicked.connect(self.pop_pTr_ok)
###############Functions for the partial trainability popup################
def pop_pTr_reset(self):
#Reset the model to initial state, with partial trainability
print("Not implemented yet")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("<html><head/><body><p>Not implemented yet.</p></body></html>")
msg.setWindowTitle("Not implemented")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def pop_pTr_update_1(self):#main worker function
#Apply the requested changes and display updated model in table
pTr_table = self.popup_trainability_ui.tableWidget_pop_pTr_layersTable
#Read the table:
Layer_names,Layer_trainabilities = [],[]
rowCount = pTr_table.rowCount()
for row in range(rowCount):
#Layer_indices.append(str(pTr_table.item(row, 0).text()))
Layer_names.append(str(pTr_table.item(row, 0).text()))
Layer_trainabilities.append(float(pTr_table.cellWidget(row, 4).value()))
Layer_trainabilities = np.array(Layer_trainabilities)
#What are the current trainability statuses of the model
Layer_trainabilities_orig = np.array([self.model_keras.get_layer(l_name).trainable for l_name in Layer_names])
diff = abs( Layer_trainabilities - Layer_trainabilities_orig )
ind = np.where( diff>0 )[0]
#Where do we have a trainability between 0 and 1
#ind = np.where( (Layer_trainabilities>0) & (Layer_trainabilities<1) )[0]
if len(ind)>0:
Layer_trainabilities = list(Layer_trainabilities[ind])
Layer_names = list(np.array(Layer_names)[ind])
#Update the model using user-specified trainabilities
self.model_keras = partial_trainability(self.model_keras,Layer_names,Layer_trainabilities)
#Update lineEdit_partialTrainability
Layer_types = [self.model_keras.layers[i].__class__.__name__ for i in range(len(self.model_keras.layers))]
#Count Dense and Conv layers
is_dense_or_conv = [layer_type in ["Dense","Conv2D"] for layer_type in Layer_types]
index = np.where(np.array(is_dense_or_conv)==True)[0]
Layer_train_status = [self.model_keras.layers[layerindex].trainable for layerindex in index]
self.lineEdit_partialTrainability.setText(str(Layer_train_status))#enables the lineEdit which shows the trainability status of each layer.
else:
print("Nothing to do. All trainabilities are either 0 or 1")
def pop_pTr_update_2(self):#call pop_pTr_update_1 to do the work and then update the window
try:
self.pop_pTr_update_1()#Change the model on self.model_keras according to the table
self.partialTrainability()#Update the popup window by calling the partialTrainability function
except Exception as e:
#There is an issue building the model!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error occured when building model:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def pop_pTr_ok(self):
self.pop_pTr_update_1()#Change the model on self.model_keras according to the table; If 'Update' was used before, there will not be done work again, but the model is used as it is
#To make the model accessible, it has to be saved to a new .model file
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save model', Default_dict["Path of last model"],"AIDeveloper model file (*.model)")
filename = filename[0]
path, fname = os.path.split(filename)
if len(fname)==0:
return
#add the suffix _session.xlsx
if not fname.endswith(".model"):
fname = fname +".model"
filename = os.path.join(path,fname)
self.model_keras.save(filename)
#Activate 'load and restart' and put this file
#Avoid the automatic popup
self.radioButton_NewModel.setChecked(False)
self.radioButton_LoadRestartModel.setChecked(False)
self.radioButton_LoadContinueModel.setChecked(True)
self.lineEdit_LoadModelPath.setText(filename)#put the filename in the lineedit
#Destroy the window
self.popup_trainability = None
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(tooltips["modelsaved_success"])
msg.setWindowTitle("Sucessfully created and selected model")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def lossW_comboB(self,state_nr,listindex):
if listindex==-1:
ui_item = self.popup_lossW_ui
else:
ui_item = self.fittingpopups_ui[listindex].popup_lossW_ui
state_str = ui_item.comboBox_lossW.itemText(int(state_nr))
rows_nr = int(ui_item.tableWidget_lossW.rowCount())
if rows_nr==0:
state_str = "None"
if state_str=="None":
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(False)
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setValue(1.0)
elif state_str=="Custom":
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(True)
elif state_str=="Balanced":
#How many cells in total per epoch
events_epoch = [int(ui_item.tableWidget_lossW.item(rowPos,2).text()) for rowPos in range(rows_nr)]
classes = [int(ui_item.tableWidget_lossW.item(rowPos,0).text()) for rowPos in range(rows_nr)]
counter = {}
for i in range(len(classes)):
counter[classes[i]]=events_epoch[i]
max_val = float(max(counter.values()))
class_weights = {class_id : max_val/num_images for class_id, num_images in counter.items()}
class_weights = list(class_weights.values())
for rowPos in range(rows_nr):
colPos = 4 #"Loss weights"
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setEnabled(False)
ui_item.tableWidget_lossW.cellWidget(rowPos,colPos).setValue(class_weights[rowPos])
def lossW_ok(self,listindex):
#This happens when the user presses the OK button on the popup for
#custom loss weights
if listindex==-1:
ui_item = self
else:
ui_item = self.fittingpopups_ui[listindex]
#Which option was used on comboBox_lossW?
state_str = ui_item.popup_lossW_ui.comboBox_lossW.currentText()
if state_str=="None":#User left None. This actually means its off
ui_item.lineEdit_lossW.setText("")
ui_item.pushButton_lossW.setEnabled(False)
ui_item.checkBox_lossW.setChecked(False)
elif state_str=="Custom":#User left None. This actually means its off
#There are custom values
#Read the loss values on the table
rows_nr = int(ui_item.popup_lossW_ui.tableWidget_lossW.rowCount())
classes = [int(ui_item.popup_lossW_ui.tableWidget_lossW.item(rowPos,0).text()) for rowPos in range(rows_nr)]
loss_weights = [float(ui_item.popup_lossW_ui.tableWidget_lossW.cellWidget(rowPos,4).value()) for rowPos in range(rows_nr)]
counter = {}
for i in range(len(classes)):
counter[classes[i]]=loss_weights[i]
#Put counter (its a dictionary) to lineedit
ui_item.lineEdit_lossW.setText(str(counter))
elif state_str=="Balanced":#Balanced, the values are computed later fresh, even when user changes the cell-numbers again
ui_item.lineEdit_lossW.setText("Balanced")
#Destroy the window
ui_item.popup_lossW = None
def lossW_cancel(self,listindex):
#This happens when the user presses the Cancel button on the popup for
#custom loss weights
if listindex==-1:
ui_item = self
else:
ui_item = self.fittingpopups_ui[listindex]
if ui_item.lineEdit_lossW.text()=="":
#if state_str=="None":#User left None. This actually means its off
ui_item.lineEdit_lossW.setText("")
ui_item.pushButton_lossW.setEnabled(False)
ui_item.checkBox_lossW.setChecked(False)
ui_item.popup_lossW = None
return
#Destroy the window
ui_item.popup_lossW = None
def get_norm_from_manualselection(self):
norm = self.comboBox_w.currentText()
index = self.comboBox_Normalization.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization.setCurrentIndex(index)
self.w.close()
def popup_normalization(self):
self.w = MyPopup()
self.gridLayout_w = QtWidgets.QGridLayout(self.w)
self.gridLayout_w.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout_w = QtWidgets.QVBoxLayout()
self.verticalLayout_w.setObjectName(_fromUtf8("verticalLayout"))
self.label_w = QtWidgets.QLabel(self.w)
self.label_w.setAlignment(QtCore.Qt.AlignCenter)
self.label_w.setObjectName(_fromUtf8("label_w"))
self.verticalLayout_w.addWidget(self.label_w)
self.horizontalLayout_2_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_2_w.setObjectName(_fromUtf8("horizontalLayout_2"))
self.pushButton_w = QtWidgets.QPushButton(self.w)
self.pushButton_w.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout_2_w.addWidget(self.pushButton_w)
self.horizontalLayout_w = QtWidgets.QHBoxLayout()
self.horizontalLayout_w.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2_w = QtWidgets.QLabel(self.w)
self.label_2_w.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2_w.setObjectName(_fromUtf8("label_2_w"))
self.horizontalLayout_w.addWidget(self.label_2_w)
self.comboBox_w = QtWidgets.QComboBox(self.w)
self.comboBox_w.setObjectName(_fromUtf8("comboBox"))
self.comboBox_w.addItems(["Select"]+self.norm_methods)
self.comboBox_w.setMinimumSize(QtCore.QSize(200,22))
self.comboBox_w.setMaximumSize(QtCore.QSize(200, 22))
width=self.comboBox_w.fontMetrics().boundingRect(max(self.norm_methods, key=len)).width()
self.comboBox_w.view().setFixedWidth(width+10)
self.comboBox_w.currentIndexChanged.connect(self.get_norm_from_manualselection)
self.horizontalLayout_w.addWidget(self.comboBox_w)
self.horizontalLayout_2_w.addLayout(self.horizontalLayout_w)
self.verticalLayout_w.addLayout(self.horizontalLayout_2_w)
self.gridLayout_w.addLayout(self.verticalLayout_w, 0, 0, 1, 1)
self.w.setWindowTitle("Select normalization method")
self.label_w.setText("You are about to continue training a pretrained model\n"
"Please select the meta file of that model to load the normalization method\n"
"or choose the normalization method manually")
self.pushButton_w.setText("Load meta file")
self.label_2_w.setText("Manual \n"
"selection")
#one button that allows to load a meta file containing the norm-method
self.pushButton_w.clicked.connect(self.get_norm_from_modelparafile)
self.w.show()
def action_preview_model(self,enabled):#function runs when radioButton_LoadRestartModel or radioButton_LoadContinueModel was clicked
if enabled:
#if the "Load and restart" radiobutton was clicked:
if self.radioButton_LoadRestartModel.isChecked():
modelname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open model architecture', Default_dict["Path of last model"],"Architecture or model (*.arch *.model)")
modelname = modelname[0]
#modelname_for_dict = modelname
#if the "Load and continue" radiobutton was clicked:
elif self.radioButton_LoadContinueModel.isChecked():
modelname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open model with all parameters', Default_dict["Path of last model"],"Keras model (*.model)")
modelname = modelname[0]
#modelname_for_dict = modelname
self.lineEdit_LoadModelPath.setText(modelname) #Put the filename to the line edit
#Remember the location for next time
if len(str(modelname))>0:
Default_dict["Path of last model"] = os.path.split(modelname)[0]
aid_bin.save_aid_settings(Default_dict)
#If user wants to load and restart a model
if self.radioButton_LoadRestartModel.isChecked():
#load the model and print summary
if modelname.endswith(".arch"):
json_file = open(modelname, 'r')
model_config = json_file.read()
json_file.close()
model_config = json.loads(model_config)
#cut the .json off
modelname = modelname.split(".arch")[0]
#Or a .model (FULL model with trained weights) , but for display only load the architecture
elif modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
#model = model_from_config(model_config)
modelname = modelname.split(".model")[0]
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
text1 = "Architecture: loaded from .arch\nWeights: will be randomly initialized'\n"
#Try to find the corresponding .meta
#All models have a number:
metaname = modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
if chosen_model is not None:
#chosen_model is a string that should be contained in comboBox_ModelSelection
index = self.comboBox_ModelSelection.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
else:
index = self.comboBox_ModelSelection.findText('None', QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
#Otherwise, user wants to load and continue training a model
elif self.radioButton_LoadContinueModel.isChecked():
#User can only choose a .model (FULL model with trained weights) , but for display only load the architecture
if modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
#model = model_from_config(model_config)
modelname = modelname.split(".model")[0]
#Try to find the corresponding .meta
#All models have a number:
metaname = modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
else:
#Try to get the model architecture and adjust the combobox
try:
ismlp,chosen_model = model_zoo.mlpconfig_to_str(model_config)
except:#No model could be identified
chosen_model = "None"
if chosen_model is not None:
#chosen_model is a string that should be contained in comboBox_ModelSelection
index = self.comboBox_ModelSelection.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
else:
index = self.comboBox_ModelSelection.findText('None', QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_ModelSelection.setCurrentIndex(index)
text1 = "Architecture: loaded from .model\nWeights: pretrained weights will be loaded and used when hitting button 'Initialize model!'\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
#In both cases (restart or continue) the input dimensions have to fit
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
#
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
#Retrieve the color_mode from the model (nr. of channels in last in_dim)
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: loaded Model takes: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked_no_rtdc_ds()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
#aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
text = text1+text2+text3
self.textBrowser_Info.setText(text)
if self.radioButton_LoadContinueModel.isChecked():
#"Load the parameter file of the model that should be continued and apply the same normalization"
#Make a popup: You are about to continue to train a pretrained model
#Please select the parameter file of that model to load the normalization method
#or choose the normalization method manually:
#this is important
self.popup_normalization()
def get_metrics(self,nr_classes):
Metrics = []
f1 = bool(self.checkBox_expertF1.isChecked())
if f1==True:
Metrics.append("f1_score")
precision = bool(self.checkBox_expertPrecision.isChecked())
if precision==True:
Metrics.append("precision")
recall = bool(self.checkBox_expertRecall.isChecked())
if recall==True:
Metrics.append("recall")
metrics = ['accuracy'] + Metrics
metrics = aid_dl.get_metrics_tensors(metrics,nr_classes)
return metrics
def action_set_modelpath_and_name(self):
#Get the path and filename for the new model
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save model', Default_dict["Path of last model"],"Keras Model file (*.model)")
filename = filename[0]
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if filename.endswith(".arch"):
filename = filename.split(".arch")[0]
#add the suffix .model
if not filename.endswith(".model"):
filename = filename +".model"
self.lineEdit_modelname.setText(filename)
#Write to Default_dict
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def get_dataOverview(self):
table = self.tableWidget_Info
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
rows = table.rowCount()
tmp_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
tmp_df.iloc[i, j] = table.item(i, j).text()
except:
tmp_df.iloc[i, j] = np.nan
return tmp_df
def action_initialize_model(self,duties="initialize_train"):
"""
duties: which tasks should be performed: "initialize", "initialize_train", "initialize_lrfind"
"""
#print("duties: "+str(duties))
#Create config (define which device to use)
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
# try:
# K.clear_session()
# except:
# print("Could not clear_session (7)")
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#Initialize the model
#######################Load and restart model##########################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
text0 = "Loaded model: "+load_modelname
#load the model and print summary
if load_modelname.endswith(".arch"):
json_file = open(load_modelname, 'r')
model_config = json_file.read()
json_file.close()
model_keras = model_from_json(model_config)
model_config = json.loads(model_config)
text1 = "\nArchitecture: loaded from .arch\nWeights: randomly initialized\n"
#Or a .model (FULL model with trained weights) , but for display only load the architecture
elif load_modelname.endswith(".model"):
#Load the model config (this is the architecture)
model_full_h5 = h5py.File(load_modelname, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
model_keras = model_from_config(model_config)
text1 = "\nArchitecture: loaded from .model\nWeights: randomly initialized\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
try:
metaname = load_modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
except:
chosen_model = str(self.comboBox_ModelSelection.currentText())
#In both cases (restart or continue) the input dimensions have to fit
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
channels = in_dim[-1] #TensorFlow: channels in last dimension
#Compile model (consider user-specific metrics)
model_metrics = self.get_metrics(out_dim)
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
###############Load and continue training the model####################
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
text0 = "Loaded model: "+load_modelname+"\n"
#User can only choose a .model (FULL model with trained weights) , but for display only load the architecture
if load_modelname.endswith(".model"):
#Load the full model
try:
model_keras = load_model(load_modelname,custom_objects=aid_dl.get_custom_metrics())
except:
K.clear_session() #On linux It happened that there was an error, if another fitting run before
model_keras = load_model(load_modelname,custom_objects=aid_dl.get_custom_metrics())
#model_config = model_keras.config() #Load the model config (this is the architecture)
#load_modelname = load_modelname.split(".model")[0]
text1 = "Architecture: loaded from .model\nWeights: pretrained weights were loaded\n"
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid file was chosen. Please specify a file that was created using AIDeveloper or Keras")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#raise ValueError("No valid file was chosen")
try:
metaname = load_modelname.rsplit('_',1)[0]+"_meta.xlsx"
if os.path.isfile(metaname):
#open the metafile
meta = pd.read_excel(metaname,sheet_name="Parameters")
if "Chosen Model" in list(meta.keys()):
chosen_model = meta["Chosen Model"].iloc[-1]
else:
chosen_model = str(self.comboBox_ModelSelection.currentText())
except:
chosen_model = str(self.comboBox_ModelSelection.currentText())
#Check input dimensions
#The number of output classes should also fit but this is not essential
#but most users certainly want the same number of classes (output)->Give Info
# in_dim = model_config['config'][0]['config']['batch_input_shape']
# out_dim = model_config['config'][-2]['config']['units']
in_dim = model_keras.get_input_shape_at(0)
out_dim = model_keras.get_output_shape_at(0)[1]
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
channel_text = "1 channel (Grayscale)"
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
channel_text = "3 channels (RGB)"
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in the ui to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
text2 = "Model Input: "+str(in_dim[-3])+" x "+str(in_dim[-2]) + " pixel images and "+channel_text+"\n"
if int(self.spinBox_imagecrop.value())!=int(in_dim[-2]):
self.spinBox_imagecrop.setValue(in_dim[-2])
text2 = text2+ "'Input image size' in GUI was changed accordingly\n"
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
indices = [s["class"] for s in SelectedFiles]
nr_classes = np.max(indices)+1
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
###########################New model###################################
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
text0 = load_modelname
#Create a new model!
#Get what the user wants from the dropdown menu!
chosen_model = str(self.comboBox_ModelSelection.currentText())
if chosen_model==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
in_dim = int(self.spinBox_imagecrop.value())
SelectedFiles = self.items_clicked()
#rtdc_ds = SelectedFiles[0]["rtdc_ds"]
if str(self.comboBox_GrayOrRGB.currentText())=="Grayscale":
channels=1
elif str(self.comboBox_GrayOrRGB.currentText())=="RGB":
channels=3
indices = [s["class"] for s in SelectedFiles]
indices_unique = np.unique(np.array(indices))
if len(indices_unique)<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Need at least two classes to fit. Please specify .rtdc files and corresponding indeces")
msg.setWindowTitle("No valid file was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
out_dim = np.max(indices)+1
nr_classes = out_dim
if chosen_model=="None":
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No model specified!")
msg.setWindowTitle("No model specified!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
try:
model_keras = model_zoo.get_model(chosen_model,in_dim,channels,out_dim)
except Exception as e:
#There is an issue building the model!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error occured when building model:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
text1 = "Architecture: created "+chosen_model+" design\nWeights: Initialized random weights\n"
if self.get_color_mode()=="Grayscale":
channels = 1
channel_text = "1 channel (Grayscale)"
elif self.get_color_mode()=="RGB":
channels = 3
channel_text = "3 channels (RGB)"
text2 = "Model Input: "+str(in_dim)+" x "+str(in_dim) + " pixel images and "+channel_text+"\n"
if int(nr_classes)==int(out_dim):
text3 = "Output: "+str(out_dim)+" classes\n"
elif int(nr_classes)>int(out_dim):#Dataset has more classes than the model provides!
text3 = "Loaded model has only "+(str(out_dim))+\
" output nodes (classes) but your selected data has "+str(nr_classes)+\
" classes. Therefore, the model will be adjusted before fitting, by customizing the final Dense layer.\n"
aid_dl.model_add_classes(model_keras,nr_classes)#this function changes model_keras inplace
elif int(nr_classes)<int(out_dim):#Dataset has less classes than the model provides!
text3 = "Model output: The architecture you chose has "+(str(out_dim))+\
" output nodes (classes) and your selected data has only "+str(nr_classes)+\
" classes. This is fine. The model will essentially have some excess classes that are not used.\n"
else:
#No radio-button was chosen
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the radiobuttons to define the model")
msg.setWindowTitle("No model defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If expert mode is on, apply the requested options
#This affects learning rate, trainability of layers and dropout rate
expert_mode = bool(self.groupBox_expertMode.isChecked())
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
learning_rate_expert_on = bool(self.groupBox_learningRate.isChecked())
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
loss_expert_on = bool(self.checkBox_expt_loss.isChecked())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert_on = bool(self.checkBox_optimizer.isChecked())
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.optimizer_settings.copy() #get the current optimizer settings
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
model_metrics = self.get_metrics(nr_classes)
if "collection" in chosen_model.lower():
for m in model_keras[1]: #in a collection, model_keras[0] are the names of the models and model_keras[1] is a list of all models
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,self.get_metrics(nr_classes),nr_classes)
if not "collection" in chosen_model.lower():
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
if type(model_keras)==tuple:#when user chose a Collection of models, a tuple is returned by get_model
collection = True
else:
collection = False
if collection==False: #if there is a single model:
#Original learning rate (before expert mode is switched on!)
try:
self.learning_rate_original = K.eval(model_keras.optimizer.lr)
except:
print("Session busy. Try again in fresh session...")
#tf.reset_default_graph() #Make sure to start with a fresh session
K.clear_session()
sess = tf.Session(graph = tf.Graph(), config=config_gpu)
#K.set_session(sess)
self.learning_rate_original = K.eval(model_keras.optimizer.lr)
#Get initial trainability states of model
self.trainable_original, self.layer_names = aid_dl.model_get_trainable_list(model_keras)
trainable_original, layer_names = self.trainable_original, self.layer_names
self.do_list_original = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
do_list_original = self.do_list_original
if collection==True: #if there is a collection of models:
#Original learning rate (before expert mode is switched on!)
self.learning_rate_original = [K.eval(model_keras[1][i].optimizer.lr) for i in range(len(model_keras[1]))]
#Get initial trainability states of model
trainable_layerName = [aid_dl.model_get_trainable_list(model_keras[1][i]) for i in range(len(model_keras[1]))]
self.trainable_original = [trainable_layerName[i][0] for i in range(len(trainable_layerName))]
self.layer_names = [trainable_layerName[i][1] for i in range(len(trainable_layerName))]
trainable_original, layer_names = self.trainable_original, self.layer_names
self.do_list_original = [aid_dl.get_dropout(model_keras[1][i]) for i in range(len(model_keras[1]))]#Get a list of dropout values of the current model
do_list_original = self.do_list_original
#TODO add expert mode ability for collection of models. Maybe define self.model_keras as a list in general. So, fitting a single model is just a special case
if expert_mode==True:
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,out_dim,loss_expert,optimizer_settings,learning_rate_const)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,out_dim,loss_expert,optimizer_settings,learning_rate_const)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a float
dropout_expert_list=len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers")
msg.setWindowTitle("Issue with Expert->Dropout")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
dropout_expert_list = []
return
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Could not understand user input at Expert->Dropout")
msg.setWindowTitle("Issue with Expert->Dropout")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
text_updates = ""
#Learning Rate: Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[1][0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6: #If there is a difference, change lr accordingly
K.set_value(model_keras.optimizer.lr, learning_rate_const)
text_updates += "Learning rate: "+str(lr_current)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
else:
optimizer_current = aid_dl.get_optimizer_name(model_keras[1][0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Optimizer: "+optimizer_expert+"\n"
#Loss function: Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
if collection==True:
if model_keras[1][0].loss!=loss_expert:
recompile = True
text_updates += "Loss function: "+loss_expert+"\n"
if recompile==True:
if collection==False:
print("Recompiling...")
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras[1]:
print("Recompiling...")
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
self.model_keras = model_keras #overwrite the model in self
if collection == False:
#Get user-specified filename for the new model
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)>0:
text_new_modelname = "Model will be saved as: "+new_modelname+"\n"
else:
text_new_modelname = "Please specify a model path (name for the model to be fitted)\n"
if collection == True:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)>0:
new_modelname = os.path.split(new_modelname)
text_new_modelname = "Collection of Models will be saved into: "+new_modelname[0]+"\n"
else:
text_new_modelname = "Please specify a model path (name for the model to be fitted)\n"
#Info about normalization method
norm = str(self.comboBox_Normalization.currentText())
text4 = "Input image normalization method: "+norm+"\n"
#Check if there are dropout layers:
#do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
if len(do_list_original)>0:
text4 = text4+"Found "+str(len(do_list_original)) +" dropout layers with rates: "+str(do_list_original)+"\n"
else:
text4 = text4+"Found no dropout layers\n"
if expert_mode==True:
if dropout_expert_on:
text4 = text4+text_do+"\n"
# if learning_rate_expert_on==True:
# if K.eval(model_keras.optimizer.lr) != learning_rate_const: #if the learning rate in UI is NOT equal to the lr of the model...
# text_lr = "Changed the learning rate to: "+ str(learning_rate_const)+"\n"
# text4 = text4+text_lr
text5 = "Model summary:\n"
summary = []
if collection==False:
model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text_new_modelname+text0+text1+text2+text3+text4+text_updates+text5+summary
self.textBrowser_Info.setText(text)
#Save the model architecture: serialize to JSON
model_json = model_keras.to_json()
with open(new_modelname.split(".model")[0]+".arch", "w") as json_file:
json_file.write(model_json)
elif collection==True:
if self.groupBox_expertMode.isChecked()==True:
self.groupBox_expertMode.setChecked(False)
print("Turned off expert mode. Not implemented yet for collections of models. This does not affect user-specified metrics (precision/recall/f1)")
self.model_keras_arch_path = [new_modelname[0]+os.sep+new_modelname[1].split(".model")[0]+"_"+model_keras[0][i]+".arch" for i in range(len(model_keras[0]))]
for i in range(len(model_keras[1])):
model_keras[1][i].summary(print_fn=summary.append)
#Save the model architecture: serialize to JSON
model_json = model_keras[1][i].to_json()
with open(self.model_keras_arch_path[i], "w") as json_file:
json_file.write(model_json)
summary = "\n".join(summary)
text = text_new_modelname+text0+text1+text2+text3+text4+text_updates+text5+summary
self.textBrowser_Info.setText(text)
#Save the model to a variable on self
self.model_keras = model_keras
#Get the user-defined cropping size
crop = int(self.spinBox_imagecrop.value())
#Make the cropsize a bit larger since the images will later be rotated
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Estimate RAM needed
nr_imgs = np.sum([np.array(list(SelectedFiles)[i]["nr_images"]) for i in range(len(list(SelectedFiles)))])
ram_needed = np.round(nr_imgs * aid_bin.calc_ram_need(cropsize2),2)
if duties=="initialize":#Stop here if the model just needs to be intialized (for expert mode->partial trainability)
return
elif duties=="initialize_train":
#Tell the user if the data is stored and read from ram or not
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Should the model only be initialized,\
or do you want to start fitting right after? For fitting, data will\
be loaded to RAM (since Edit->Data to RAM is enabled), which will\
require "+str(ram_needed)+"MB of RAM.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Initialize model or initialize and fit model?")
msg.addButton(QtGui.QPushButton('Stop after model initialization'), QtGui.QMessageBox.RejectRole)
msg.addButton(QtGui.QPushButton('Start fitting'), QtGui.QMessageBox.ApplyRole)
retval = msg.exec_()
elif duties=="initialize_lrfind":
retval = 1
else:
print("Invalid duties: "+duties)
return
if retval==0: #yes role: Only initialize model
print("Closing session")
del model_keras
sess.close()
return
elif retval == 1:
if self.actionDataToRam.isChecked():
color_mode = self.get_color_mode()
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Check if there is data already available in RAM
if len(self.ram)==0:#if there is already data stored on ram
print("No data on RAM. I have to load")
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
else:
print("There is already some data on RAM")
new_fileinfo = {"SelectedFiles":list(SelectedFiles),"cropsize2":cropsize2,"zoom_factors":zoom_factors,"zoom_order":zoom_order,"color_mode":color_mode}
identical = aid_bin.ram_compare_data(self.ram,new_fileinfo)
if not identical:
#Load the data
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
if identical:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "Data was loaded before! Should same data be reused? If not, click 'Reload data', e.g. if you altered the Data-table."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Found data on RAM")
msg.addButton(QtGui.QPushButton('Reuse data'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Reload data'), QtGui.QMessageBox.NoRole)
retval = msg.exec_()
if retval==0:
print("Re-use data")
#Re-use same data
elif retval==1:
print("Re-load data")
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),cropsize2,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
#Finally, activate the 'Fit model' button again
#self.pushButton_FitModel.setEnabled(True)
if duties=="initialize_train":
self.action_fit_model()
if duties=="initialize_lrfind":
self.action_lr_finder()
del model_keras
def action_fit_model_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
gpu_used = False
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
gpu_used = True
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#get an index of the fitting popup
listindex = self.popupcounter-1
#Get user-specified filename for the new model
new_modelname = str(self.lineEdit_modelname.text())
model_keras_path = self.model_keras_path
if type(model_keras_path)==list:
collection = True
#Take the initialized models
model_keras_path = self.model_keras_path
model_keras = [load_model(model_keras_path[i],custom_objects=aid_dl.get_custom_metrics()) for i in range(len(model_keras_path)) ]
model_architecture_names = self.model_keras[0]
print(model_architecture_names)
#self.model_keras = None
else:
collection = False
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
#self.model_keras = None
#Initialize a variable for the parallel model
model_keras_p = None
#Multi-GPU
if deviceSelected=="Multi-GPU":
if collection==False:
print("Adjusting the model for Multi-GPU")
model_keras_p = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
if self.radioButton_LoadContinueModel.isChecked():#calling multi_gpu_model resets the weights. Hence, they need to be put in place again
model_keras_p.layers[-2].set_weights(model_keras.get_weights())
elif collection==True:
print("Collection & Multi-GPU is not supported yet")
return
# model_keras_p = []
# for m in model_keras_p:
# print("Adjusting the model for Multi-GPU")
# model_keras_p.append(multi_gpu_model(m, gpus=gpu_nr)) #indicate the numbers of gpus that you have
##############Main function after hitting FIT MODEL####################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras[0].output.shape.dims[1])
#Metrics to be displayed during fitting (real-time)
model_metrics = self.get_metrics(nr_classes)
#Compile model
if collection==False and deviceSelected=="Single-GPU":
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==False and deviceSelected=="Multi-GPU":
model_keras_p.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==True and deviceSelected=="Single-GPU":
#Switch off the expert tab!
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(False)
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setEnabled(False)
for m in model_keras:
m.compile(loss='categorical_crossentropy',optimizer='adam',metrics=self.get_metrics(nr_classes))#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif collection==True and deviceSelected=="Multi-GPU":
print("Collection & Multi-GPU is not supported yet")
return
#Original learning rate:
#learning_rate_original = self.learning_rate_original#K.eval(model_keras.optimizer.lr)
#Original trainable states of layers with parameters
trainable_original, layer_names = self.trainable_original, self.layer_names
do_list_original = self.do_list_original
#Collect all information about the fitting routine that was user
#defined
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
new_model = self.radioButton_NewModel.isChecked()
chosen_model = str(self.comboBox_ModelSelection.currentText())
crop = int(self.spinBox_imagecrop.value())
color_mode = str(self.comboBox_GrayOrRGB.currentText())
loadrestart_model = self.radioButton_LoadRestartModel.isChecked()
loadcontinue_model = self.radioButton_LoadContinueModel.isChecked()
norm = str(self.comboBox_Normalization.currentText())
nr_epochs = int(self.spinBox_NrEpochs.value())
keras_refresh_nr_epochs = int(self.spinBox_RefreshAfterEpochs.value())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_refresh_nr_epochs = int(self.spinBox_RefreshAfterNrEpochs.value())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
if collection==False:
expert_mode = bool(self.groupBox_expertMode.isChecked())
elif collection==True:
expert_mode = self.groupBox_expertMode.setChecked(False)
print("Expert mode was switched off. Not implemented yet for collections")
expert_mode = False
batchSize_expert = int(self.spinBox_batchSize.value())
epochs_expert = int(self.spinBox_epochs.value())
learning_rate_expert_on = bool(self.groupBox_learningRate.isChecked())
learning_rate_const_on = bool(self.radioButton_LrConst.isChecked())
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
learning_rate_cycLR_on = bool(self.radioButton_LrCycl.isChecked())
try:
cycLrMin = float(self.lineEdit_cycLrMin.text())
cycLrMax = float(self.lineEdit_cycLrMax.text())
except:
cycLrMin = []
cycLrMax = []
cycLrMethod = str(self.comboBox_cycLrMethod.currentText())
#clr_settings = self.fittingpopups_ui[listindex].clr_settings.copy()
cycLrGamma = self.clr_settings["gamma"]
SelectedFiles = self.items_clicked()#to compute cycLrStepSize, the number of training images is needed
cycLrStepSize = aid_dl.get_cyclStepSize(SelectedFiles,self.clr_settings["step_size"],batchSize_expert)
#put clr_settings onto fittingpopup,
self.fittingpopups_ui[listindex].clr_settings = self.clr_settings.copy()#assign a copy. Otherwise values in both dicts are changed when manipulating one dict
#put optimizer_settings onto fittingpopup,
self.fittingpopups_ui[listindex].optimizer_settings = self.optimizer_settings.copy()#assign a copy. Otherwise values in both dicts are changed when manipulating one dict
learning_rate_expo_on = bool(self.radioButton_LrExpo.isChecked())
expDecInitLr = float(self.doubleSpinBox_expDecInitLr.value())
expDecSteps = int(self.spinBox_expDecSteps.value())
expDecRate = float(self.doubleSpinBox_expDecRate.value())
loss_expert_on = bool(self.checkBox_expt_loss.isChecked())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert_on = bool(self.checkBox_optimizer.isChecked())
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.fittingpopups_ui[listindex].optimizer_settings.copy()#make a copy to make sure that changes in the UI are not immediately used
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert_on = bool(self.checkBox_lossW.isChecked())
lossW_expert = str(self.lineEdit_lossW.text())
#To get the class weights (loss), the SelectedFiles are required
#SelectedFiles = self.items_clicked()
#Check if xtra_data should be used for training
xtra_in = [s["xtra_in"] for s in SelectedFiles]
if len(set(xtra_in))==1:
xtra_in = list(set(xtra_in))[0]
elif len(set(xtra_in))>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
self.fittingpopups_ui[listindex].SelectedFiles = SelectedFiles #save to self. to make it accessible for popup showing loss weights
#Get the class weights. This function runs now the first time in the fitting routine.
#It is possible that the user chose Custom weights and then changed the classes. Hence first check if
#there is a weight for each class available.
class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert,custom_check_classes=True)
if type(class_weight)==list:
#There has been a mismatch between the classes described in class_weight and the classes available in SelectedFiles!
lossW_expert = class_weight[0] #overwrite
class_weight = class_weight[1]
print("class_weight:" +str(class_weight))
print("There has been a mismatch between the classes described in \
Loss weights and the classes available in the selected files! \
Hence, the Loss weights are set to Balanced")
#Get callback for the learning rate scheduling
callback_lr = aid_dl.get_lr_callback(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
#save a dictionary with initial values
lr_dict_original = aid_dl.get_lr_dict(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
if collection==False:
#Create an excel file
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles_df = pd.DataFrame(SelectedFiles)
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
elif collection==True:
SelectedFiles_df = pd.DataFrame(SelectedFiles)
Writers = []
#Create excel files
for i in range(len(model_keras_path)):
writer = pd.ExcelWriter(model_keras_path[i].split(".model")[0]+'_meta.xlsx', engine='openpyxl')
Writers.append(writer)
for writer in Writers:
#Used files go to a separate sheet on the MetaFile.xlsx
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
###############################Expert Mode values##################
expert_mode_before = False #There was no expert mode used before.
if expert_mode==True:
#activate groupBox_expertMode_pop
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(True)
expert_mode_before = True
#Some settings only need to be changed once, after user clicked apply at next epoch
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a single float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:
text = "Could not understand user input at Expert->Dropout"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do)
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection == False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
if collection == True:
for m in model_keras:
K.set_value(m.optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
#Check if model has to be compiled again
recompile = False #by default, dont recompile (happens for "Load and continue" training a model)
if new_model==True:
recompile = True
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
if collection==True:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if collection==True:
if model_keras[0].loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True:
print("Recompiling...")
if collection==False:
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras:
aid_dl.model_compile(m, loss_expert, optimizer_settings, learning_rate_const,model_metrics, nr_classes)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to adjust learning rate, loss, optimizer")
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_updates)
#self.model_keras = model_keras #overwrite the model on self
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_epoch_train = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_train]
rtdc_path_train = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_train]
zoom_factors_train = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_train]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_train = [selectedfile["shuffle"] for selectedfile in SelectedFiles_train]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_train])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#read self.ram to new variable ; next clear ram. This is required for multitasking (training multiple models with maybe different data)
DATA = self.ram
if verbose==1:
print("Length of DATA (in RAM) = "+str(len(DATA)))
#clear the ram again if desired
if not self.actionKeep_Data_in_RAM.isChecked():
self.ram = dict()
print("Removed data from self.ram. For further training sessions, data has to be reloaded.")
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles_train)):
#if Data_to_RAM was not enabled:
#if not self.actionDataToRam.isChecked():
if len(DATA)==0: #Here, the entire training set needs to be used! Not only random images!
#Replace=true: means individual cells could occur several times
gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
# else: #get a similar generator, using the ram-data
# if len(DATA)==0:
# gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
else:
gen_train = aid_img.gen_crop_img_ram(DATA,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen_train)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "<html><head/><body><p>The standard deviation of your training data is zero! This would lead to division by zero. To avoid this, I will divide by 0.0001 instead.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Std. is zero")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
Para_dict = pd.DataFrame()
def update_para_dict():
#Document changes in the meta-file
Para_dict["AIDeveloper_Version"]=VERSION,
Para_dict["model_zoo_version"]=model_zoo_version,
try:
Para_dict["OS"]=platform.platform(),
Para_dict["CPU"]=platform.processor(),
except:
Para_dict["OS"]="Unknown",
Para_dict["CPU"]="Unknown",
Para_dict["Modelname"]=new_modelname,
Para_dict["Chosen Model"]=chosen_model,
Para_dict["new_model"]=new_model,
Para_dict["loadrestart_model"]=loadrestart_model,
Para_dict["loadcontinue_model"]=loadcontinue_model,
Para_dict["Continued_Fitting_From"]=load_modelname,
Para_dict["Input image size"]=crop,
Para_dict["Color Mode"]=color_mode,
Para_dict["Zoom order"]=zoom_order,
Para_dict["Device"]=deviceSelected,
Para_dict["gpu_used"]=gpu_used,
Para_dict["gpu_memory"]=gpu_memory,
Para_dict["Output Nr. classes"]=nr_classes,
Para_dict["Normalization"]=norm,
Para_dict["Nr. epochs"]=nr_epochs,
Para_dict["Keras refresh after nr. epochs"]=keras_refresh_nr_epochs,
Para_dict["Horz. flip"]=h_flip,
Para_dict["Vert. flip"]=v_flip,
Para_dict["rotation"]=rotation,
Para_dict["width_shift"]=width_shift,
Para_dict["height_shift"]=height_shift,
Para_dict["zoom"]=zoom,
Para_dict["shear"]=shear,
Para_dict["Brightness refresh after nr. epochs"]=brightness_refresh_nr_epochs,
Para_dict["Brightness add. lower"]=brightness_add_lower,
Para_dict["Brightness add. upper"]=brightness_add_upper,
Para_dict["Brightness mult. lower"]=brightness_mult_lower,
Para_dict["Brightness mult. upper"]=brightness_mult_upper,
Para_dict["Gaussnoise Mean"]=gaussnoise_mean,
Para_dict["Gaussnoise Scale"]=gaussnoise_scale,
Para_dict["Contrast on"]=contrast_on,
Para_dict["Contrast Lower"]=contrast_lower,
Para_dict["Contrast Higher"]=contrast_higher,
Para_dict["Saturation on"]=saturation_on,
Para_dict["Saturation Lower"]=saturation_lower,
Para_dict["Saturation Higher"]=saturation_higher,
Para_dict["Hue on"]=hue_on,
Para_dict["Hue delta"]=hue_delta,
Para_dict["Average blur on"]=avgBlur_on,
Para_dict["Average blur Lower"]=avgBlur_min,
Para_dict["Average blur Higher"]=avgBlur_max,
Para_dict["Gauss blur on"]=gaussBlur_on,
Para_dict["Gauss blur Lower"]=gaussBlur_min,
Para_dict["Gauss blur Higher"]=gaussBlur_max,
Para_dict["Motion blur on"]=motionBlur_on,
Para_dict["Motion blur Kernel"]=motionBlur_kernel,
Para_dict["Motion blur Angle"]=motionBlur_angle,
Para_dict["Epoch_Started_Using_These_Settings"]=counter,
Para_dict["expert_mode"]=expert_mode,
Para_dict["batchSize_expert"]=batchSize_expert,
Para_dict["epochs_expert"]=epochs_expert,
Para_dict["learning_rate_expert_on"]=learning_rate_expert_on,
Para_dict["learning_rate_const_on"]=learning_rate_const_on,
Para_dict["learning_rate_const"]=learning_rate_const,
Para_dict["learning_rate_cycLR_on"]=learning_rate_cycLR_on,
Para_dict["cycLrMin"]=cycLrMin,
Para_dict["cycLrMax"]=cycLrMax,
Para_dict["cycLrMethod"] = cycLrMethod,
Para_dict["clr_settings"] = self.fittingpopups_ui[listindex].clr_settings,
Para_dict["learning_rate_expo_on"]=learning_rate_expo_on,
Para_dict["expDecInitLr"]=expDecInitLr,
Para_dict["expDecSteps"]=expDecSteps,
Para_dict["expDecRate"]=expDecRate,
Para_dict["loss_expert_on"]=loss_expert_on,
Para_dict["loss_expert"]=loss_expert,
Para_dict["optimizer_expert_on"]=optimizer_expert_on,
Para_dict["optimizer_expert"]=optimizer_expert,
Para_dict["optimizer_settings"]=optimizer_settings,
Para_dict["paddingMode"]=paddingMode,
Para_dict["train_last_layers"]=train_last_layers,
Para_dict["train_last_layers_n"]=train_last_layers_n,
Para_dict["train_dense_layers"]=train_dense_layers,
Para_dict["dropout_expert_on"]=dropout_expert_on,
Para_dict["dropout_expert"]=dropout_expert,
Para_dict["lossW_expert_on"]=lossW_expert_on,
Para_dict["lossW_expert"]=lossW_expert,
Para_dict["class_weight"]=class_weight,
Para_dict["metrics"]=model_metrics,
#training data cannot be changed during training
if norm == "StdScaling using mean and std of all training data":
#This needs to be saved into Para_dict since it will be required for inference
Para_dict["Mean of training data used for scaling"]=mean_trainingdata,
Para_dict["Std of training data used for scaling"]=std_trainingdata,
if collection==False:
if counter == 0:
Para_dict.to_excel(self.fittingpopups_ui[listindex].writer,sheet_name='Parameters')
else:
Para_dict.to_excel(self.fittingpopups_ui[listindex].writer,sheet_name='Parameters',startrow=self.fittingpopups_ui[listindex].writer.sheets['Parameters'].max_row,header= False)
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH)#change to read/write
try:
self.fittingpopups_ui[listindex].writer.save()
except:
pass
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)#change to only readable
if collection==True:
for i in range(len(Writers)):
Para_dict["Chosen Model"]=model_architecture_names[i],
writer = Writers[i]
if counter==0:
Para_dict.to_excel(Writers[i],sheet_name='Parameters')
else:
Para_dict.to_excel(writer,sheet_name='Parameters',startrow=writer.sheets['Parameters'].max_row,header= False)
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
try:
writer.save()
except:
pass
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #read only
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_valid])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
############Cropping#####################
X_valid,y_valid,Indices,xtra_valid = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if not self.actionDataToRam.isChecked():
#Replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else: #get a similar generator, using the ram-data
if len(DATA)==0:
#Replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
gen_valid = aid_img.gen_crop_img_ram(DATA,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
generator_cropped_out = next(gen_valid)
X_valid.append(generator_cropped_out[0])
#y_valid.append(np.repeat(indices_valid[i],nr_events_epoch_valid[i]))
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(generator_cropped_out[1])
xtra_valid.append(generator_cropped_out[2])
del generator_cropped_out
#Save the validation set (BEFORE normalization!)
#Write to.rtdc files
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
aid_bin.write_rtdc(new_modelname.split(".model")[0]+'_Valid_Data.rtdc',rtdc_path_valid,X_valid,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=xtra_valid)
elif bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
aid_bin.write_rtdc(new_modelname.split(".model")[0]+'_Valid_Data.rtdc',rtdc_path_valid,X_valid,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=xtra_valid)
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Use a different Exporting option in ->Edit if you want to export the data")
# msg.setWindowTitle("Export is turned off!")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
X_valid = np.concatenate(X_valid)
y_valid = np.concatenate(y_valid)
Y_valid = np_utils.to_categorical(y_valid, nr_classes)# * 2 - 1
xtra_valid = np.concatenate(xtra_valid)
if not bool(self.actionExport_Off.isChecked())==True:
#Save the labels
np.savetxt(new_modelname.split(".model")[0]+'_Valid_Labels.txt',y_valid.astype(int),fmt='%i')
if len(X_valid.shape)==4:
channels=3
elif len(X_valid.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_valid.shape))
if channels==1:
#Add the "channels" dimension
X_valid = np.expand_dims(X_valid,3)
#get it to theano image format (channels first)
#X_valid = X_valid.swapaxes(-1,-2).swapaxes(-2,-3)
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
#Validation data can be cropped to final size already since no augmentation
#will happen on this data set
dim_val = X_valid.shape
print("Current dim. of validation set (pixels x pixels) = "+str(dim_val[2]))
if dim_val[2]!=crop:
print("Change dim. (pixels x pixels) of validation set to = "+str(crop))
remove = int(dim_val[2]/2.0 - crop/2.0)
X_valid = X_valid[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
if xtra_in==True:
print("Add Xtra Data to X_valid")
X_valid = [X_valid,xtra_valid]
####################Update the PopupFitting########################
self.fittingpopups_ui[listindex].lineEdit_modelname_pop.setText(new_modelname) #set the progress bar to zero
self.fittingpopups_ui[listindex].spinBox_imagecrop_pop.setValue(crop)
self.fittingpopups_ui[listindex].spinBox_NrEpochs.setValue(nr_epochs)
self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.addItems(self.predefined_models)
chosen_model = str(self.comboBox_ModelSelection.currentText())
index = self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.findText(chosen_model, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_ModelSelection_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].comboBox_Normalization_pop.addItems(self.norm_methods)
index = self.fittingpopups_ui[listindex].comboBox_Normalization_pop.findText(norm, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_Normalization_pop.setCurrentIndex(index)
#padding
index = self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.findText(paddingMode, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.setCurrentIndex(index)
#zoom_order
self.fittingpopups_ui[listindex].comboBox_zoomOrder.setCurrentIndex(zoom_order)
#CPU setting
self.fittingpopups_ui[listindex].comboBox_cpu_pop.addItem("Default CPU")
if gpu_used==False:
self.fittingpopups_ui[listindex].radioButton_cpu_pop.setChecked(True)
self.fittingpopups_ui[listindex].doubleSpinBox_memory_pop.setValue(gpu_memory)
#GPU setting
if gpu_used==True:
self.fittingpopups_ui[listindex].radioButton_gpu_pop.setChecked(True)
self.fittingpopups_ui[listindex].comboBox_gpu_pop.addItem(deviceSelected)
self.fittingpopups_ui[listindex].doubleSpinBox_memory_pop.setValue(gpu_memory)
self.fittingpopups_ui[listindex].spinBox_RefreshAfterEpochs_pop.setValue(keras_refresh_nr_epochs)
self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.setChecked(h_flip)
self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.setChecked(v_flip)
self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.setText(str(rotation))
self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.setText(str(width_shift))
self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.setText(str(height_shift))
self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.setText(str(zoom))
self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.setText(str(shear))
self.fittingpopups_ui[listindex].spinBox_RefreshAfterNrEpochs_pop.setValue(brightness_refresh_nr_epochs)
self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.setValue(brightness_add_lower)
self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.setValue(brightness_add_upper)
self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.setValue(brightness_mult_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.setValue(brightness_mult_upper)
self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.setValue(gaussnoise_mean)
self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.setValue(gaussnoise_scale)
self.fittingpopups_ui[listindex].checkBox_contrast_pop.setChecked(contrast_on)
self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.setValue(contrast_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.setValue(contrast_higher)
self.fittingpopups_ui[listindex].checkBox_saturation_pop.setChecked(saturation_on)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.setValue(saturation_lower)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.setValue(saturation_higher)
self.fittingpopups_ui[listindex].checkBox_hue_pop.setChecked(hue_on)
self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.setValue(hue_delta)
#Special for saturation and hue. Only enabled for RGB:
saturation_enabled = bool(self.checkBox_saturation.isEnabled())
self.fittingpopups_ui[listindex].checkBox_saturation_pop.setEnabled(saturation_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.setEnabled(saturation_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.setEnabled(saturation_enabled)
hue_enabled = bool(self.checkBox_hue.isEnabled())
self.fittingpopups_ui[listindex].checkBox_hue_pop.setEnabled(hue_enabled)
self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.setEnabled(hue_enabled)
self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.setChecked(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].label_avgBlurMin_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.setValue(avgBlur_min)
self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].label_avgBlurMax_pop.setEnabled(avgBlur_on)
self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.setValue(avgBlur_max)
self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.setChecked(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].label_gaussBlurMin_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.setValue(gaussBlur_min)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].label_gaussBlurMax_pop.setEnabled(gaussBlur_on)
self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.setValue(gaussBlur_max)
self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.setChecked(motionBlur_on)
self.fittingpopups_ui[listindex].label_motionBlurKernel_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].label_motionBlurAngle_pop.setEnabled(motionBlur_on)
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setEnabled(motionBlur_on)
if len(motionBlur_kernel)==1:
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setText(str(motionBlur_kernel[0]))
if len(motionBlur_kernel)==2:
self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.setText(str(motionBlur_kernel[0])+","+str(motionBlur_kernel[1]))
if len(motionBlur_angle)==1:
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setText(str(motionBlur_angle[0]))
if len(motionBlur_kernel)==2:
self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.setText(str(motionBlur_angle[0])+","+str(motionBlur_angle[1]))
self.fittingpopups_ui[listindex].groupBox_expertMode_pop.setChecked(expert_mode)
self.fittingpopups_ui[listindex].spinBox_batchSize.setValue(batchSize_expert)
self.fittingpopups_ui[listindex].spinBox_epochs.setValue(epochs_expert)
self.fittingpopups_ui[listindex].groupBox_learningRate_pop.setChecked(learning_rate_expert_on)
self.fittingpopups_ui[listindex].radioButton_LrConst.setChecked(learning_rate_const_on)
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.setValue(learning_rate_const)
self.fittingpopups_ui[listindex].radioButton_LrCycl.setChecked(learning_rate_cycLR_on)
self.fittingpopups_ui[listindex].lineEdit_cycLrMin.setText(str(cycLrMin))
self.fittingpopups_ui[listindex].lineEdit_cycLrMax.setText(str(cycLrMax))
index = self.fittingpopups_ui[listindex].comboBox_cycLrMethod.findText(cycLrMethod, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_cycLrMethod.setCurrentIndex(index)
self.fittingpopups_ui[listindex].radioButton_LrExpo.setChecked(learning_rate_expo_on)
self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.setValue(expDecInitLr)
self.fittingpopups_ui[listindex].spinBox_expDecSteps.setValue(expDecSteps)
self.fittingpopups_ui[listindex].doubleSpinBox_expDecRate.setValue(expDecRate)
self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.setChecked(loss_expert_on)
self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.setChecked(loss_expert_on)
index = self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.findText(loss_expert, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].checkBox_optimizer_pop.setChecked(optimizer_expert_on)
index = self.fittingpopups_ui[listindex].comboBox_optimizer.findText(optimizer_expert, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_optimizer.setCurrentIndex(index)
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.setValue(learning_rate_const)
index = self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.findText(paddingMode, QtCore.Qt.MatchFixedString)
if index >= 0:
self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.setCurrentIndex(index)
self.fittingpopups_ui[listindex].checkBox_trainLastNOnly_pop.setChecked(train_last_layers)
self.fittingpopups_ui[listindex].spinBox_trainLastNOnly_pop.setValue(train_last_layers_n)
self.fittingpopups_ui[listindex].checkBox_trainDenseOnly_pop.setChecked(train_dense_layers)
self.fittingpopups_ui[listindex].checkBox_dropout_pop.setChecked(dropout_expert_on)
do_text = [str(do_i) for do_i in dropout_expert]
self.fittingpopups_ui[listindex].lineEdit_dropout_pop.setText((', '.join(do_text)))
self.fittingpopups_ui[listindex].checkBox_lossW.setChecked(lossW_expert_on)
self.fittingpopups_ui[listindex].pushButton_lossW.setEnabled(lossW_expert_on)
self.fittingpopups_ui[listindex].lineEdit_lossW.setText(str(lossW_expert))
if channels==1:
channel_text = "Grayscale"
elif channels==3:
channel_text = "RGB"
self.fittingpopups_ui[listindex].comboBox_colorMode_pop.addItems([channel_text])
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Dictionary defining affine image augmentation options:
aug_paras = {"v_flip":v_flip,"h_flip":h_flip,"rotation":rotation,"width_shift":width_shift,"height_shift":height_shift,"zoom":zoom,"shear":shear}
Histories,Index,Saved,Stopwatch,LearningRate = [],[],[],[],[]
if collection==True:
HISTORIES = [ [] for model in model_keras]
SAVED = [ [] for model in model_keras]
counter = 0
saving_failed = False #when saving fails, this becomes true and the user will be informed at the end of training
#Save the initial values (Epoch 1)
update_para_dict()
model_metrics_names = []
for met in model_metrics:
if type(met)==str:
model_metrics_names.append(met)
else:
metname = met.name
metlabel = met.label
if metlabel>0:
metname = metname+"_"+str(metlabel)
model_metrics_names.append(metname)
#Dictionary for records in metrics
model_metrics_records = {}
model_metrics_records["acc"] = 0 #accuracy starts at zero and approaches 1 during training
model_metrics_records["val_acc"] = 0 #accuracy starts at zero and approaches 1 during training
model_metrics_records["loss"] = 9E20 ##loss starts very high and approaches 0 during training
model_metrics_records["val_loss"] = 9E20 ##loss starts very high and approaches 0 during training
for key in model_metrics_names:
if 'precision' in key or 'recall' in key or 'f1_score' in key:
model_metrics_records[key] = 0 #those metrics start at zero and approach 1
model_metrics_records["val_"+key] = 0 #those metrics start at zero and approach 1
gen_train_refresh = False
time_start = time.time()
t1 = time.time() #Initialize a timer; this is used to save the meta file every few seconds
t2 = time.time() #Initialize a timer; this is used update the fitting parameters
while counter < nr_epochs:#nr_epochs: #resample nr_epochs times
#Only keep fitting if the respective window is open:
isVisible = self.fittingpopups[listindex].isVisible()
if isVisible:
############Keras image augmentation#####################
#Start the first iteration:
X_train,y_train,xtra_train = [],[],[]
t3 = time.time()
for i in range(len(SelectedFiles_train)):
if len(DATA)==0 or gen_train_refresh:
#Replace true means that individual cells could occur several times
gen_train = aid_img.gen_crop_img(cropsize2,rtdc_path_train[i],nr_events_epoch_train[i],random_images=shuffle_train[i],replace=True,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
gen_train_refresh = False
else:
gen_train = aid_img.gen_crop_img_ram(DATA,rtdc_path_train[i],nr_events_epoch_train[i],random_images=shuffle_train[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
data_ = next(gen_train)
X_train.append(data_[0])
y_train.append(np.repeat(indices_train[i],X_train[-1].shape[0]))
if xtra_in==True:
xtra_train.append(data_[2])
del data_
X_train = np.concatenate(X_train)
X_train = X_train.astype(np.uint8)
y_train = np.concatenate(y_train)
if xtra_in==True:
print("Retrieve Xtra Data...")
xtra_train = np.concatenate(xtra_train)
t4 = time.time()
if verbose == 1:
print("Time to load data (from .rtdc or RAM) and crop="+str(t4-t3))
if len(X_train.shape)==4:
channels=3
elif len(X_train.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_train.shape))
if channels==1:
#Add the "channels" dimension
X_train = np.expand_dims(X_train,3)
t3 = time.time()
#Some parallellization: use nr_threads (number of CPUs)
nr_threads = 1 #Somehow for MNIST and CIFAR, processing always took longer for nr_threads>1 . I tried nr_threads=2,4,8,16,24
if nr_threads == 1:
X_batch = aid_img.affine_augm(X_train,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear) #Affine image augmentation
y_batch = np.copy(y_train)
else:
#Divde data in 4 batches
X_train = np.array_split(X_train,nr_threads)
y_train = np.array_split(y_train,nr_threads)
self.X_batch = [False] * nr_threads
self.y_batch = [False] * nr_threads
self.counter_aug = 0
self.Workers_augm = []
def imgaug_worker(aug_paras,progress_callback,history_callback):
i = aug_paras["i"]
self.X_batch[i] = aid_img.affine_augm(aug_paras["X_train"],v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear)
self.y_batch[i] = aug_paras["y_train"]
self.counter_aug+=1
t3_a = time.time()
for i in range(nr_threads):
aug_paras_ = copy.deepcopy(aug_paras)
aug_paras_["i"] = i
aug_paras_["X_train"]=X_train[i]#augparas contains rotation and so on. X_train and y_train are overwritten in each iteration (for each worker new X_train)
aug_paras_["y_train"]=y_train[i]
self.Workers_augm.append(Worker(imgaug_worker,aug_paras_))
self.threadpool.start(self.Workers_augm[i])
while self.counter_aug < nr_threads:
time.sleep(0.01)#Wait 0.1s, then check the len again
t3_b = time.time()
if verbose == 1:
print("Time to perform affine augmentation_internal ="+str(t3_b-t3_a))
X_batch = np.concatenate(self.X_batch)
y_batch = np.concatenate(self.y_batch)
Y_batch = np_utils.to_categorical(y_batch, nr_classes)# * 2 - 1
t4 = time.time()
if verbose == 1:
print("Time to perform affine augmentation ="+str(t4-t3))
t3 = time.time()
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
t4 = time.time()
# if verbose == 1:
# print("Time to crop to final size="+str(t4-t3))
X_batch_orig = np.copy(X_batch) #save into new array and do some iterations with varying noise/brightness
#reuse this X_batch_orig a few times since this augmentation was costly
keras_iter_counter = 0
while keras_iter_counter < keras_refresh_nr_epochs and counter < nr_epochs:
keras_iter_counter+=1
#if t2-t1>5: #check for changed settings every 5 seconds
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#Another while loop if the user wants to reuse the keras-augmented data
#several times and only apply brightness augmentation:
brightness_iter_counter = 0
while brightness_iter_counter < brightness_refresh_nr_epochs and counter < nr_epochs:
#In each iteration, start with non-augmented data
X_batch = np.copy(X_batch_orig)#copy from X_batch_orig, X_batch will be altered without altering X_batch_orig
X_batch = X_batch.astype(np.uint8)
#########X_batch = X_batch.astype(float)########## No float yet :) !!!
brightness_iter_counter += 1
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
if self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.isChecked():
nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_NrEpochs.value())
#Keras stuff
keras_refresh_nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_RefreshAfterEpochs_pop.value())
h_flip = bool(self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.isChecked())
v_flip = bool(self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.isChecked())
rotation = float(self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.text())
width_shift = float(self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.text())
height_shift = float(self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.text())
zoom = float(self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.text())
shear = float(self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.text())
#Brightness stuff
brightness_refresh_nr_epochs = int(self.fittingpopups_ui[listindex].spinBox_RefreshAfterNrEpochs_pop.value())
brightness_add_lower = float(self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.value())
brightness_add_upper = float(self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.value())
brightness_mult_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.value())
brightness_mult_upper = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.value())
gaussnoise_mean = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.value())
gaussnoise_scale = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.value())
contrast_on = bool(self.fittingpopups_ui[listindex].checkBox_contrast_pop.isChecked())
contrast_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.value())
contrast_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.value())
saturation_on = bool(self.fittingpopups_ui[listindex].checkBox_saturation_pop.isChecked())
saturation_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.value())
saturation_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.value())
hue_on = bool(self.fittingpopups_ui[listindex].checkBox_hue_pop.isChecked())
hue_delta = float(self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.value())
avgBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.isChecked())
avgBlur_min = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.value())
avgBlur_max = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.value())
gaussBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.isChecked())
gaussBlur_min = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.value())
gaussBlur_max = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.value())
motionBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.isChecked())
motionBlur_kernel = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.text())
motionBlur_angle = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
#Expert mode stuff
expert_mode = bool(self.fittingpopups_ui[listindex].groupBox_expertMode_pop.isChecked())
batchSize_expert = int(self.fittingpopups_ui[listindex].spinBox_batchSize.value())
epochs_expert = int(self.fittingpopups_ui[listindex].spinBox_epochs.value())
learning_rate_expert_on = bool(self.fittingpopups_ui[listindex].groupBox_learningRate_pop.isChecked())
learning_rate_const_on = bool(self.fittingpopups_ui[listindex].radioButton_LrConst.isChecked())
learning_rate_const = float(self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value())
learning_rate_cycLR_on = bool(self.fittingpopups_ui[listindex].radioButton_LrCycl.isChecked())
try:
cycLrMin = float(self.fittingpopups_ui[listindex].lineEdit_cycLrMin.text())
cycLrMax = float(self.fittingpopups_ui[listindex].lineEdit_cycLrMax.text())
except:
cycLrMin = []
cycLrMax = []
cycLrMethod = str(self.fittingpopups_ui[listindex].comboBox_cycLrMethod.currentText())
clr_settings = self.fittingpopups_ui[listindex].clr_settings.copy() #Get a copy of the current optimizer_settings. .copy prevents that changes in the UI have immediate effect
cycLrStepSize = aid_dl.get_cyclStepSize(SelectedFiles,clr_settings["step_size"],batchSize_expert)
cycLrGamma = clr_settings["gamma"]
learning_rate_expo_on = bool(self.fittingpopups_ui[listindex].radioButton_LrExpo.isChecked())
expDecInitLr = float(self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.value())
expDecSteps = int(self.fittingpopups_ui[listindex].spinBox_expDecSteps.value())
expDecRate = float(self.fittingpopups_ui[listindex].doubleSpinBox_expDecRate.value())
loss_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_expt_loss_pop.isChecked())
loss_expert = str(self.fittingpopups_ui[listindex].comboBox_expt_loss_pop.currentText())
optimizer_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_optimizer_pop.isChecked())
optimizer_expert = str(self.fittingpopups_ui[listindex].comboBox_optimizer.currentText())
optimizer_settings = self.fittingpopups_ui[listindex].optimizer_settings.copy() #Get a copy of the current optimizer_settings. .copy prevents that changes in the UI have immediate effect
paddingMode_ = str(self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.currentText())
print("paddingMode_:"+str(paddingMode_))
if paddingMode_ != paddingMode:
print("Changed the padding mode!")
gen_train_refresh = True#otherwise changing paddingMode will not have any effect
paddingMode = paddingMode_
train_last_layers = bool(self.fittingpopups_ui[listindex].checkBox_trainLastNOnly_pop.isChecked())
train_last_layers_n = int(self.fittingpopups_ui[listindex].spinBox_trainLastNOnly_pop.value())
train_dense_layers = bool(self.fittingpopups_ui[listindex].checkBox_trainDenseOnly_pop.isChecked())
dropout_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_dropout_pop.isChecked())
try:
dropout_expert = str(self.fittingpopups_ui[listindex].lineEdit_dropout_pop.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert_on = bool(self.fittingpopups_ui[listindex].checkBox_lossW.isChecked())
lossW_expert = str(self.fittingpopups_ui[listindex].lineEdit_lossW.text())
class_weight = self.get_class_weight(self.fittingpopups_ui[listindex].SelectedFiles,lossW_expert) #
print("Updating parameter file (meta.xlsx)!")
update_para_dict()
#Changes in expert mode can affect the model: apply changes now:
if expert_mode==True:
if collection==False: #Expert mode is currently not supported for Collections
expert_mode_before = True
#Apply changes to the trainable states:
if train_last_layers==True:#Train only the last n layers
if verbose:
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
#Change the trainability states. Model compilation is done inside model_change_trainability
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if train_dense_layers==True:#Train only dense layers
if verbose:
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
#Change the trainability states. Model compilation is done inside model_change_trainability
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:
text = "Could not understand user input at Expert->Dropout"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
#Change dropout. Model .compile happens inside change_dropout function
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model due to changed dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
if verbose:
print(text_do)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do)
if learning_rate_expert_on==True:
#get the current lr_dict
lr_dict_now = aid_dl.get_lr_dict(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
if not lr_dict_now.equals(lr_dict_original):#in case the dataframes dont equal...
#generate a new callback
callback_lr = aid_dl.get_lr_callback(learning_rate_const_on,learning_rate_const,
learning_rate_cycLR_on,cycLrMin,cycLrMax,
cycLrMethod,cycLrStepSize,
learning_rate_expo_on,
expDecInitLr,expDecSteps,expDecRate,cycLrGamma)
#update lr_dict_original
lr_dict_original = lr_dict_now.copy()
else:
callback_lr = None
if optimizer_expert_on==True:
optimizer_settings_now = self.fittingpopups_ui[listindex].optimizer_settings.copy()
if not optimizer_settings_now == optimizer_settings:#in case the dataframes dont equal...
#grab these new optimizer values
optimizer_settings = optimizer_settings_now.copy()
############################Invert 'expert' settings#########################
if expert_mode==False and expert_mode_before==True: #if the expert mode was selected before, change the parameters back to original vlaues
if verbose:
print("Expert mode was used before and settings are now inverted")
#Re-set trainable states back to original state
if verbose:
print("Change 'trainable' layers back to original state")
summary = aid_dl.model_change_trainability(model_keras,trainable_original,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change 'trainable' layers back to original state")
text1 = "Expert mode turns off: Request for orignal trainability states:\n"
#text2 = "\n--------------------\n"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text1+summary)
if verbose:
print("Change dropout rates in dropout layers back to original values")
callback_lr = None#remove learning rate callback
if verbose:
print("Set learning rate callback to None")
if len(do_list_original)>0:
do_changed = aid_dl.change_dropout(model_keras,do_list_original,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout values back to original state. I'm not sure if this works!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to original values: "+str(do_list_original)
else:
text_do = "Dropout rate(s) in model was/were not changed"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_do+"\n")
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection==False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection==False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
else:
K.set_value(model_keras[0].optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
else:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
loss_ = model_keras.loss
else:
loss_ = model_keras[0].loss
if loss_!=loss_expert:
recompile = True
model_metrics_records["loss"] = 9E20 #Reset the record for loss because new loss function could converge to a different min. value
model_metrics_records["val_loss"] = 9E20 #Reset the record for loss because new loss function could converge to a different min. value
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True and collection==False:
print("Recompiling...")
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change optimizer, loss and learninig rate.")
elif recompile==True and collection==True:
if model_keras_p!=None:#if model_keras_p is NOT None, there exists a parallel model, which also needs to be re-compiled
print("Altering learning rate is not suported for collections (yet)")
return
print("Recompiling...")
for m in model_keras:
aid_dl.model_compile(m,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text_updates)
#self.model_keras = model_keras #overwrite the model in self
self.fittingpopups_ui[listindex].checkBox_ApplyNextEpoch.setChecked(False)
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
X_batch = X_batch.astype(np.uint8)
if contrast_on:
t_con_aug_1 = time.time()
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
t_con_aug_2 = time.time()
if verbose == 1:
print("Time to augment contrast="+str(t_con_aug_2-t_con_aug_1))
if saturation_on or hue_on:
t_sat_aug_1 = time.time()
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta) #Gray and RGB; both values >0!
t_sat_aug_2 = time.time()
if verbose == 1:
print("Time to augment saturation/hue="+str(t_sat_aug_2-t_sat_aug_1))
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
t_avgBlur_1 = time.time()
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
t_avgBlur_2 = time.time()
if verbose == 1:
print("Time to perform average blurring="+str(t_avgBlur_2-t_avgBlur_1))
if gaussBlur_on:
t_gaussBlur_1 = time.time()
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
t_gaussBlur_2 = time.time()
if verbose == 1:
print("Time to perform gaussian blurring="+str(t_gaussBlur_2-t_gaussBlur_1))
if motionBlur_on:
t_motionBlur_1 = time.time()
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
t_motionBlur_2 = time.time()
if verbose == 1:
print("Time to perform motion blurring="+str(t_motionBlur_2-t_motionBlur_1))
##########Brightness noise#########
t3 = time.time()
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
t4 = time.time()
if verbose == 1:
print("Time to augment brightness="+str(t4-t3))
t3 = time.time()
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
t4 = time.time()
if verbose == 1:
print("Time to apply normalization="+str(t4-t3))
#Fitting can be paused
while str(self.fittingpopups_ui[listindex].pushButton_Pause_pop.text())=="":
time.sleep(2) #wait 2 seconds and then check the text on the button again
if verbose == 1:
print("X_batch.shape")
print(X_batch.shape)
if xtra_in==True:
print("Add Xtra Data to X_batch")
X_batch = [X_batch,xtra_train]
#generate a list of callbacks, get empty list if callback_lr is none
callbacks = []
if callback_lr!=None:
callbacks.append(callback_lr)
###################################################
###############Actual fitting######################
###################################################
if collection==False:
if model_keras_p == None:
history = model_keras.fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
elif model_keras_p != None:
history = model_keras_p.fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
Histories.append(history.history)
Stopwatch.append(time.time()-time_start)
learningrate = K.get_value(history.model.optimizer.lr)
LearningRate.append(learningrate)
#Check if any metric broke a record
record_broken = False #initially, assume there is no new record
for key in history.history.keys():
value = history.history[key][-1]
record = model_metrics_records[key]
if 'val_acc' in key or 'val_precision' in key or 'val_recall' in key or 'val_f1_score' in key:
#These metrics should go up (towards 1)
if value>record:
model_metrics_records[key] = value
record_broken = True
print(key+" broke record -> Model will be saved" )
elif 'val_loss' in key:
#This metric should go down (towards 0)
if value<record:
model_metrics_records[key] = value
record_broken = True
print(key+" broke record -> Model will be saved")
#self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
if record_broken:#if any record was broken...
if deviceSelected=="Multi-GPU":#in case of Multi-GPU...
#In case of multi-GPU, first copy the weights of the parallel model to the normal model
model_keras.set_weights(model_keras_p.layers[-2].get_weights())
#Save the model
text = "Save model to following directory: \n"+os.path.dirname(new_modelname)
print(text)
if os.path.exists(os.path.dirname(new_modelname)):
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
text = "Record was broken -> saved model"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
else:#in case the folder does not exist (anymore), create a folder in temp
#what is the foldername of the model?
text = "Saving failed. Create folder in temp"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
saving_failed = True
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
text = "Your temp. folder is here: "+str(temp_path)
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
parentfolder = aid_bin.splitall(new_modelname)[-2]
fname = os.path.split(new_modelname)[-1]
#create that folder in temp if it not exists already
if not os.path.exists(os.path.join(temp_path,parentfolder)):
text = "Create folder in temp:\n"+os.path.join(temp_path,parentfolder)
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
os.mkdir(os.path.join(temp_path,parentfolder))
#change the new_modelname to a path in temp
new_modelname = os.path.join(temp_path,parentfolder,fname)
#inform user!
text = "Could not find original folder. Files are now saved to "+new_modelname
text = "<span style=\' color: red;\'>" +text+"</span>"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
text = "<span style=\' color: black;\'>" +""+"</span>"
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#Save the model
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
text = "Model saved successfully to temp"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#Also update the excel writer!
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
Saved.append(1)
#Also save the model upon user-request
elif bool(self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.isChecked())==True:
if deviceSelected=="Multi-GPU":#in case of Multi-GPU...
#In case of multi-GPU, first copy the weights of the parallel model to the normal model
model_keras.set_weights(model_keras_p.layers[-2].get_weights())
model_keras.save(new_modelname.split(".model")[0]+"_"+str(counter)+".model")
Saved.append(1)
self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.setChecked(False)
else:
Saved.append(0)
elif collection==True:
for i in range(len(model_keras)):
#Expert-settings return automatically to default values when Expert-mode is unchecked
history = model_keras[i].fit(X_batch, Y_batch, batch_size=batchSize_expert, epochs=epochs_expert,verbose=verbose, validation_data=(X_valid, Y_valid),class_weight=class_weight,callbacks=callbacks)
HISTORIES[i].append(history.history)
learningrate = K.get_value(history.model.optimizer.lr)
print("model_keras_path[i]")
print(model_keras_path[i])
#Check if any metric broke a record
record_broken = False #initially, assume there is no new record
for key in history.history.keys():
value = history.history[key][-1]
record = model_metrics_records[key]
if 'val_acc' in key or 'val_precision' in key or 'val_recall' in key or 'val_f1_score' in key:
#These metrics should go up (towards 1)
if value>record:
model_metrics_records[key] = value
record_broken = True
text = key+" broke record -> Model will be saved"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#one could 'break' here, but I want to update all records
elif 'val_loss' in key:
#This metric should go down (towards 0)
if value<record:
model_metrics_records[key] = value
record_broken = True
text = key+" broke record -> Model will be saved"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#For collections of models:
if record_broken:
#Save the model
model_keras[i].save(model_keras_path[i].split(".model")[0]+"_"+str(counter)+".model")
SAVED[i].append(1)
elif bool(self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.isChecked())==True:
model_keras[i].save(model_keras_path[i].split(".model")[0]+"_"+str(counter)+".model")
SAVED[i].append(1)
self.fittingpopups_ui[listindex].checkBox_saveEpoch_pop.setChecked(False)
else:
SAVED[i].append(0)
callback_progessbar = float(counter)/nr_epochs
progress_callback.emit(100.0*callback_progessbar)
history_emit = history.history
history_emit["LearningRate"] = [learningrate]
history_callback.emit(history_emit)
Index.append(counter)
t2 = time.time()
if collection==False:
if counter==0:
#If this runs the first time, create the file with header
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
#If this runs the first time, create the file with header
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(writer,sheet_name='History')
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
meta_saving_t = int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value())
text = "meta.xlsx was saved (automatic saving every "+str(meta_saving_t)+"s)"
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#self.fittingpopups_ui[listindex].backup.append({"DF1":DF1})
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
#Get a sensible frequency for saving the dataframe (every 20s)
elif t2-t1>int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value()):
#elif counter%50==0: #otherwise save the history to excel after each n epochs
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
#Saving
if os.path.exists(os.path.dirname(new_modelname)):#check if folder is (still) available
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #make read only
meta_saving_t = int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value())
text = "meta.xlsx was saved (automatic saving every "+str(meta_saving_t)+"s to directory:\n)"+new_modelname
print(text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
t1 = time.time()
else:#If folder not available, create a folder in temp
text = "Failed to save meta.xlsx. -> Create folder in temp\n"
saving_failed = True
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
text += "Your temp folder is here: "+str(temp_path)+"\n"
folder = os.path.split(new_modelname)[-2]
folder = os.path.split(folder)[-1]
fname = os.path.split(new_modelname)[-1]
#create that folder in temp if it does'nt exist already
if not os.path.exists(os.path.join(temp_path,folder)):
os.mkdir(os.path.join(temp_path,folder))
text +="Created directory in temp:\n"+os.path.join(temp_path,folder)
print(text)
#change the new_modelname to a path in temp
new_modelname = os.path.join(temp_path,folder,fname)
#inform user!
text = "Could not find original folder. Files are now saved to "+new_modelname
text = "<span style=\' color: red;\'>" +text+"</span>"#put red text to the infobox
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
text = "<span style=\' color: black;\'>" +""+"</span>"#reset textcolor to black
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
#update the excel writer
writer = pd.ExcelWriter(new_modelname.split(".model")[0]+'_meta.xlsx', engine='openpyxl')
self.fittingpopups_ui[listindex].writer = writer
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
pd.DataFrame().to_excel(writer,sheet_name='Parameters') #initialize empty Sheet
pd.DataFrame().to_excel(writer,sheet_name='History') #initialize empty Sheet
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
print("There is already such a file...AID will add new data to it. Please check if this is OK")
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(writer,sheet_name='History')
writer.save()
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
print("meta.xlsx was saved")
Index,Histories,Saved,Stopwatch,LearningRate = [],[],[],[],[]#reset the lists
if collection==True:
if counter==0:
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
#If this runs the first time, create the file with header
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#If this runs the first time, create the file with header
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #read/write
DF1.to_excel(Writers[i],sheet_name='History')
Writers[i].save()
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH)
print("meta.xlsx was saved")
Index = []#reset the Index list
#Get a sensible frequency for saving the dataframe (every 20s)
elif t2-t1>int(self.fittingpopups_ui[listindex].spinBox_saveMetaEvery.value()):
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#Saving
#TODO: save to temp, if harddisk not available to prevent crash.
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
DF1.to_excel(Writers[i],sheet_name='History', startrow=Writers[i].sheets['History'].max_row,header= False)
Writers[i].save()
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH) #make read only
print("meta.xlsx was saved")
t1 = time.time()
Index = []#reset the Index list
counter+=1
progress_callback.emit(100.0)
#If the original storing locating became inaccessible (folder name changed, HD unplugged...)
#the models and meta are saved to temp folder. Inform the user!!!
if saving_failed==True:
path_orig = str(self.fittingpopups_ui[listindex].lineEdit_modelname_pop.text())
text = "<html><head/><body><p>Original path:<br>"+path_orig+\
"<br>became inaccessible during training! Files were then saved to:<br>"+\
new_modelname.split(".model")[0]+"<br>To bring both parts back together\
, you have manually open the meta files (excel) and copy;paste each sheet. \
Sorry for the inconvenience.<br>If that happens often, you may contact \
the main developer and ask him to improve that.</p></body></html>"
text = "<span style=\' font-weight:600; color: red;\'>" +text+"</span>"#put red text to the infobox
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
print('\a')#make a noise
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.setStyleSheet("background-color: yellow;")
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.moveCursor(QtGui.QTextCursor.End)
if collection==False:
if len(Histories)>0: #if the list for History files is not empty, process it!
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1["Time"] = Stopwatch
DF1["LearningRate"] = LearningRate
DF1.index = Index
Index = []#reset the Index list
Histories = []#reset the Histories list
Saved = []
#does such a file exist already? append!
if not os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
DF1.to_excel(writer,sheet_name='History')
else: # else it exists so append without writing the header
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
if os.path.isfile(new_modelname.split(".model")[0]+'_meta.xlsx'):
os.chmod(new_modelname.split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
writer.save()
writer.close()
if collection==True:
for i in range(len(HISTORIES)):
Histories = HISTORIES[i]
Saved = SAVED[i]
if len(Histories)>0: #if the list for History files is not empty, process it!
DF1 = [[ h[h_i][-1] for h_i in h] for h in Histories] #if nb_epoch in .fit() is >1, only save the last history item, beacuse this would a model that could be saved
DF1 = np.r_[DF1]
DF1 = pd.DataFrame( DF1,columns=Histories[0].keys() )
DF1["Saved"] = Saved
DF1.index = Index
HISTORIES[i] = []#reset the Histories list
SAVED[i] = []
#does such a file exist already? append!
if not os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
DF1.to_excel(Writers[i],sheet_name='History')
else: # else it exists so append without writing the header
DF1.to_excel(writer,sheet_name='History', startrow=writer.sheets['History'].max_row,header= False)
if os.path.isfile(model_keras_path[i].split(".model")[0]+'_meta.xlsx'):
os.chmod(model_keras_path[i].split(".model")[0]+'_meta.xlsx', S_IREAD|S_IRGRP|S_IROTH|S_IWRITE|S_IWGRP|S_IWOTH) #make read/write
Writers[i].save()
Writers[i].close()
Index = []#reset the Index list
sess.close()
# try:
# aid_dl.reset_keras(model_keras)
# except:
# pass
def action_fit_model(self):
#Take the initialized model
#Unfortunately, in TensorFlow it is not possile to pass a model from
#one thread to another. Therefore I have to load and save the models each time :(
model_keras = self.model_keras
if type(model_keras)==tuple:
collection=True
else:
collection=False
#Check if there was a model initialized:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a path/filename for the model to be fitted!")
msg.setWindowTitle("Model path/ filename missing!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if model_keras==None:#in case the model got deleted in another task
self.action_initialize_model(duties="initialize_train")
print("Had to re-run action_initialize_model!")
model_keras = self.model_keras
self.model_keras = None#delete this copy
if model_keras==None:
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Model could not be initialized")
# msg.setWindowTitle("Error")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
return
if not model_keras==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model is now initialized for you, Please check Model summary window below if everything is correct and then press Fit again!")
msg.setWindowTitle("No initilized model found!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#There should be at least two outputs (index 0 and 1)
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras[1][0].get_config()#["layers"]
nr_classes = int(model_keras[1][0].output.shape.dims[1])
if nr_classes<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define at least two classes")
msg.setWindowTitle("Not enough classes")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if collection==False:
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = new_modelname.split(".model")[0]+"_0.model"
#save a first version of the .model
model_keras.save(self.model_keras_path)
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
if collection==True:
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = [new_modelname.split(".model")[0]+"_"+model_keras[0][i]+".model" for i in range(len(model_keras[0]))]
for i in range(len(self.model_keras_path)):
#save a first version of the .model
model_keras[1][i].save(self.model_keras_path[i])
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
#Check that Data is on RAM
DATA_len = len(self.ram) #this returns the len of a dictionary. The dictionary is supposed to contain the training/validation data; otherwise the data is read from .rtdc data directly (SLOW unless you have ultra-good SSD)
def popup_data_to_ram(button):
yes_or_no = button.text()
if yes_or_no == "&Yes":
print("Moving data to ram")
self.actionDataToRamNow_function()
elif yes_or_no == "&No":
pass
if DATA_len==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Would you like transfer the Data to RAM now?\n(Currently the data is not in RAM and would be read from .rtdc, which slows down fitting dramatically unless you have a super-fast SSD.)")
msg.setWindowTitle("Data to RAM now?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.buttonClicked.connect(popup_data_to_ram)
msg.exec_()
###################Popup Window####################################
self.fittingpopups.append(MyPopup())
ui = aid_frontend.Fitting_Ui()
ui.setupUi(self.fittingpopups[-1]) #append the ui to the last element on the list
self.fittingpopups_ui.append(ui)
# Increase the popupcounter by one; this will help to coordinate the data flow between main ui and popup
self.popupcounter += 1
listindex=self.popupcounter-1
##############################Define functions#########################
self.fittingpopups_ui[listindex].pushButton_UpdatePlot_pop.clicked.connect(lambda: self.update_historyplot_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_Stop_pop.clicked.connect(lambda: self.stop_fitting_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_Pause_pop.clicked.connect(lambda: self.pause_fitting_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_saveTextWindow_pop.clicked.connect(lambda: self.saveTextWindow_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_clearTextWindow_pop.clicked.connect(lambda: self.clearTextWindow_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_showModelSumm_pop.clicked.connect(lambda: self.showModelSumm_pop(listindex))
self.fittingpopups_ui[listindex].pushButton_saveModelSumm_pop.clicked.connect(lambda: self.saveModelSumm_pop(listindex))
#Expert mode functions
#self.fittingpopups_ui[listindex].checkBox_pTr_pop.toggled.connect(lambda on_or_off: self.partialtrainability_activated_pop(on_or_off,listindex))
self.fittingpopups_ui[listindex].pushButton_lossW.clicked.connect(lambda: self.lossWeights_popup(listindex))
self.fittingpopups_ui[listindex].checkBox_lossW.clicked.connect(lambda on_or_off: self.lossWeights_activated(on_or_off,listindex))
self.fittingpopups_ui[listindex].Form.setWindowTitle(os.path.split(new_modelname)[1])
self.fittingpopups_ui[listindex].progressBar_Fitting_pop.setValue(0) #set the progress bar to zero
self.fittingpopups_ui[listindex].pushButton_ShowExamleImgs_pop.clicked.connect(lambda: self.action_show_example_imgs_pop(listindex))
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.doubleClicked.connect(lambda item: self.tableWidget_HistoryInfo_pop_dclick(item,listindex))
#Cyclical learning rate extra settings
self.fittingpopups_ui[listindex].pushButton_cycLrPopup.clicked.connect(lambda: self.popup_clr_settings(listindex))
self.fittingpopups_ui[listindex].comboBox_optimizer.currentTextChanged.connect(lambda: self.expert_optimizer_changed(optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].pushButton_LR_plot.clicked.connect(lambda: self.popup_lr_plot(listindex))
self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.valueChanged.connect(lambda: self.expert_lr_changed(value=self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value(),optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].doubleSpinBox_expDecInitLr.valueChanged.connect(lambda: self.expert_lr_changed(value=self.fittingpopups_ui[listindex].doubleSpinBox_learningRate.value(),optimizer_text=self.fittingpopups_ui[listindex].comboBox_optimizer.currentText(),listindex=listindex))
self.fittingpopups_ui[listindex].pushButton_optimizer_pop.clicked.connect(lambda: self.optimizer_change_settings_popup(listindex))
worker = Worker(self.action_fit_model_worker)
#Get a signal from the worker to update the progressbar
worker.signals.progress.connect(self.fittingpopups_ui[listindex].progressBar_Fitting_pop.setValue)
#Define a func which prints information during fitting to textbrowser
#And furthermore provide option to do real-time plotting
def real_time_info(dic):
self.fittingpopups_ui[listindex].Histories.append(dic) #append to a list. Will be used for plotting in the "Update plot" function
OtherMetrics_keys = self.fittingpopups_ui[listindex].RealTime_OtherMetrics.keys()
#Append to lists for real-time plotting
self.fittingpopups_ui[listindex].RealTime_Acc.append(dic["acc"][0])
self.fittingpopups_ui[listindex].RealTime_ValAcc.append(dic["val_acc"][0])
self.fittingpopups_ui[listindex].RealTime_Loss.append(dic["loss"][0])
self.fittingpopups_ui[listindex].RealTime_ValLoss.append(dic["val_loss"][0])
keys = list(dic.keys())
#sort keys alphabetically
keys_ = [l.lower() for l in keys]
ind_sort = np.argsort(keys_)
keys = list(np.array(keys)[ind_sort])
#First keys should always be acc,loss,val_acc,val_loss -in this order
keys_first = ["acc","loss","val_acc","val_loss"]
for i in range(len(keys_first)):
if keys_first[i] in keys:
ind = np.where(np.array(keys)==keys_first[i])[0][0]
if ind!=i:
del keys[ind]
keys.insert(i,keys_first[i])
for key in keys:
if "precision" in key or "f1" in key or "recall" in key or "LearningRate" in key:
if not key in OtherMetrics_keys: #if this key is missing in self.fittingpopups_ui[listindex].RealTime_OtherMetrics attach it!
self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key] = []
self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key].append(dic[key])
dic_text = [("{} {}".format(item, np.round(amount[0],4))) for item, amount in dic.items()]
text = "Epoch "+str(self.fittingpopups_ui[listindex].epoch_counter)+"\n"+" ".join(dic_text)
self.fittingpopups_ui[listindex].textBrowser_FittingInfo.append(text)
self.fittingpopups_ui[listindex].epoch_counter+=1
if self.fittingpopups_ui[listindex].epoch_counter==1:
#for each key, put a checkbox on the tableWidget_HistoryInfo_pop
rowPosition = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.rowCount()
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.insertRow(rowPosition)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.setColumnCount(len(keys))
for columnPosition in range(len(keys)):#(2,4):
key = keys[columnPosition]
#for each item, also create 2 checkboxes (train/valid)
item = QtWidgets.QTableWidgetItem(str(key))#("item {0} {1}".format(rowNumber, columnNumber))
item.setBackground(QtGui.QColor(self.colorsQt[columnPosition]))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.setItem(rowPosition, columnPosition, item)
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.resizeColumnsToContents()
self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.resizeRowsToContents()
########################Real-time plotting#########################
if self.fittingpopups_ui[listindex].checkBox_realTimePlotting_pop.isChecked():
#get the range for the real time fitting
if hasattr(self.fittingpopups_ui[listindex], 'historyscatters'):#if update plot was hit before
x = range(len(self.fittingpopups_ui[listindex].Histories))
realTimeEpochs = self.fittingpopups_ui[listindex].spinBox_realTimeEpochs.value()
if len(x)>realTimeEpochs:
x = x[-realTimeEpochs:]
#is any metric checked on the table?
colcount = int(self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.columnCount())
#Collect items that are checked
selected_items,Colors = [],[]
for colposition in range(colcount):
#is it checked?
cb = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
for i in range(len(self.fittingpopups_ui[listindex].historyscatters)): #iterate over all available plots
key = list(self.fittingpopups_ui[listindex].historyscatters.keys())[i]
if key in selected_items:
if key=="acc":
y = np.array(self.fittingpopups_ui[listindex].RealTime_Acc).astype(float)
elif key=="val_acc":
y = np.array(self.fittingpopups_ui[listindex].RealTime_ValAcc).astype(float)
elif key=="loss":
y = np.array(self.fittingpopups_ui[listindex].RealTime_Loss).astype(float)
elif key=="val_loss":
y = np.array(self.fittingpopups_ui[listindex].RealTime_ValLoss).astype(float)
elif "precision" in key or "f1" in key or "recall" in key or "LearningRate" in key:
y = np.array(self.fittingpopups_ui[listindex].RealTime_OtherMetrics[key]).astype(float).reshape(-1,)
else:
return
#Only show the last 250 epochs
if y.shape[0]>realTimeEpochs:
y = y[-realTimeEpochs:]
if y.shape[0]==len(x):
self.fittingpopups_ui[listindex].historyscatters[key].setData(x, y)#,pen=None,symbol='o',symbolPen=None,symbolBrush=brush,clear=False)
else:
print("x and y are not the same size! Omitted plotting. I will try again to plot after the next epoch.")
pg.QtGui.QApplication.processEvents()
self.fittingpopups_ui[listindex].epoch_counter = 0
#self.fittingpopups_ui[listindex].backup = [] #backup of the meta information -> in case the original folder is not accessible anymore
worker.signals.history.connect(real_time_info)
#Finally start the worker!
self.threadpool.start(worker)
self.fittingpopups[listindex].show()
def action_lr_finder(self):
#lr_find
model_keras = self.model_keras
if type(model_keras)==tuple:
collection=True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("LR screening is not supported for Collections of models. Please select single model")
msg.setWindowTitle("LR screening not supported for Collections!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
else:
collection=False
#Check if there was a model initialized:
new_modelname = str(self.lineEdit_modelname.text())
if len(new_modelname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a path/filename for the model to be fitted!")
msg.setWindowTitle("Model path/ filename missing!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if model_keras==None:#in case the model got deleted in another task
self.action_initialize_model(duties="initialize_train")
print("Had to re-run action_initialize_model!")
model_keras = self.model_keras
self.model_keras = None#delete this copy
if model_keras==None:
return
if not model_keras==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model is now initialized for you, Please check Model summary window below if everything is correct and then press Fit again!")
msg.setWindowTitle("No initilized model found!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
nr_classes = int(model_keras.output.shape.dims[1])
if nr_classes<2:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define at least two classes")
msg.setWindowTitle("Not enough classes")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#define a variable on self which allows the fit_model_worker to load this model and fit
#(sorry, this is necessary since TensorFlow does not support passing models between threads)
self.model_keras_path = new_modelname.split(".model")[0]+"_0.model"
#save a first version of the .model
model_keras.save(self.model_keras_path)
#Delete the variable to save RAM
model_keras = None #Since this uses TensorFlow, I have to reload the model action_fit_model_worker anyway
#Check that Data is on RAM
DATA_len = len(self.ram) #this returns the len of a dictionary. The dictionary is supposed to contain the training/validation data; otherwise the data is read from .rtdc data directly (SLOW unless you have ultra-good SSD)
def popup_data_to_ram(button):
yes_or_no = button.text()
if yes_or_no == "&Yes":
print("Moving data to ram")
self.actionDataToRamNow_function()
elif yes_or_no == "&No":
pass
if DATA_len==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Would you like transfer the Data to RAM now?\n(Currently the data is not in RAM and would be read from .rtdc, which slows down fitting dramatically unless you have a super-fast SSD.)")
msg.setWindowTitle("Data to RAM now?")
msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
msg.buttonClicked.connect(popup_data_to_ram)
msg.exec_()
worker = Worker(self.action_lr_finder_worker)
#Get a signal from the worker to update the progressbar
worker.signals.progress.connect(print)
worker.signals.history.connect(print)
#Finally start the worker!
self.threadpool.start(worker)
def action_lr_finder_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
gpu_used = False
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
gpu_used = True
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
#get an index of the fitting popup
#listindex = self.popupcounter-1
#Get user-specified filename for the new model
model_keras_path = self.model_keras_path
if type(model_keras_path)==list:
collection = True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("LR screening is currently not supported for Collections of models. Please use single model")
msg.setWindowTitle("LR screening not supported for Collections")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
else:
collection = False
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(model_keras_path,custom_objects=aid_dl.get_custom_metrics())
#Initialize a variable for the parallel model
model_keras_p = None
#Multi-GPU
if deviceSelected=="Multi-GPU":
if collection==False:
print("Adjusting the model for Multi-GPU")
model_keras_p = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
if self.radioButton_LoadContinueModel.isChecked():#calling multi_gpu_model resets the weights. Hence, they need to be put in place again
model_keras_p.layers[-2].set_weights(model_keras.get_weights())
elif collection==True:
print("Collection & Multi-GPU is not supported yet")
return
##############Main function after hitting FIT MODEL####################
if self.radioButton_LoadRestartModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_LoadContinueModel.isChecked():
load_modelname = str(self.lineEdit_LoadModelPath.text())
elif self.radioButton_NewModel.isChecked():
load_modelname = "" #No model is loaded
if collection==False:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras.output.shape.dims[1])
if collection==True:
#model_config = model_keras.get_config()#["layers"]
nr_classes = int(model_keras[0].output.shape.dims[1])
#Metrics to be displayed during fitting (real-time)
model_metrics = self.get_metrics(nr_classes)
#Compile model
if deviceSelected=="Single-GPU":
model_keras.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
elif deviceSelected=="Multi-GPU":
model_keras_p.compile(loss='categorical_crossentropy',optimizer='adam',metrics=model_metrics)#dont specify loss and optimizer yet...expert stuff will follow and model will be recompiled
#Collect all information about the fitting routine that was user
#defined
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
trainable_original, layer_names = self.trainable_original, self.layer_names
crop = int(self.spinBox_imagecrop.value())
norm = str(self.comboBox_Normalization.currentText())
nr_epochs = int(self.spinBox_NrEpochs.value())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
if collection==False:
expert_mode = bool(self.groupBox_expertMode.isChecked())
elif collection==True:
expert_mode = self.groupBox_expertMode.setChecked(False)
print("Expert mode was switched off. Not implemented yet for collections")
expert_mode = False
learning_rate_const = float(self.doubleSpinBox_learningRate.value())
loss_expert = str(self.comboBox_expt_loss.currentText()).lower()
optimizer_expert = str(self.comboBox_optimizer.currentText()).lower()
optimizer_settings = self.optimizer_settings.copy()
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
train_last_layers = bool(self.checkBox_trainLastNOnly.isChecked())
train_last_layers_n = int(self.spinBox_trainLastNOnly.value())
train_dense_layers = bool(self.checkBox_trainDenseOnly.isChecked())
dropout_expert_on = bool(self.checkBox_dropout.isChecked())
try:
dropout_expert = str(self.lineEdit_dropout.text()) #due to the validator, there are no squ.brackets
dropout_expert = "["+dropout_expert+"]"
dropout_expert = ast.literal_eval(dropout_expert)
except:
dropout_expert = []
lossW_expert = str(self.lineEdit_lossW.text())
#To get the class weights (loss), the SelectedFiles are required
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Check if xtra_data should be used for training
xtra_in = [s["xtra_in"] for s in SelectedFiles]
if len(set(xtra_in))==1:
xtra_in = list(set(xtra_in))[0]
elif len(set(xtra_in))>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
#Get the class weights. This function runs now the first time in the fitting routine.
#It is possible that the user chose Custom weights and then changed the classes. Hence first check if
#there is a weight for each class available.
class_weight = self.get_class_weight(SelectedFiles,lossW_expert,custom_check_classes=True)
if type(class_weight)==list:
#There has been a mismatch between the classes described in class_weight and the classes available in SelectedFiles!
lossW_expert = class_weight[0] #overwrite
class_weight = class_weight[1]
print(class_weight)
print("There has been a mismatch between the classes described in \
Loss weights and the classes available in the selected files! \
Hence, the Loss weights are set to Balanced")
###############################Expert Mode values##################
if expert_mode==True:
#Some settings only need to be changed once, after user clicked apply at next epoch
#Apply the changes to trainable states:
if train_last_layers==True:#Train only the last n layers
print("Train only the last "+str(train_last_layers_n)+ " layer(s)")
trainable_new = (len(trainable_original)-train_last_layers_n)*[False]+train_last_layers_n*[True]
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_last_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only the last "+str(train_last_layers_n)+ " layer(s)\n"
#text2 = "\n--------------------\n"
print(text1+summary)
if train_dense_layers==True:#Train only dense layers
print("Train only dense layers")
layer_dense_ind = ["Dense" in x for x in layer_names]
layer_dense_ind = np.where(np.array(layer_dense_ind)==True)[0] #at which indices are dropout layers?
#create a list of trainable states
trainable_new = len(trainable_original)*[False]
for index in layer_dense_ind:
trainable_new[index] = True
summary = aid_dl.model_change_trainability(model_keras,trainable_new,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model for train_dense_layers==True")
text1 = "Expert mode: Request for custom trainability states: train only dense layer(s)\n"
#text2 = "\n--------------------\n"
print(text1+summary)
if dropout_expert_on==True:
#The user apparently want to change the dropout rates
do_list = aid_dl.get_dropout(model_keras)#Get a list of dropout values of the current model
#Compare the dropout values in the model to the dropout values requested by user
if len(dropout_expert)==1:#if the user gave a single float
dropout_expert_list = len(do_list)*dropout_expert #convert to list
elif len(dropout_expert)>1:
dropout_expert_list = dropout_expert
if not len(dropout_expert_list)==len(do_list):
text = "Issue with dropout: you defined "+str(len(dropout_expert_list))+" dropout rates, but model has "+str(len(do_list))+" dropout layers"
print(text)
else:
text = "Could not understand user input at Expert->Dropout"
print(text)
dropout_expert_list = []
if len(dropout_expert_list)>0 and do_list!=dropout_expert_list:#if the dropout rates of the current model is not equal to the required do_list from user...
do_changed = aid_dl.change_dropout(model_keras,dropout_expert_list,model_metrics,nr_classes,loss_expert,optimizer_settings,learning_rate_const)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to change dropout. I'm not sure if this works already!")
if do_changed==1:
text_do = "Dropout rate(s) in model was/were changed to: "+str(dropout_expert_list)
else:
text_do = "Dropout rate(s) in model was/were not changed"
else:
text_do = "Dropout rate(s) in model was/were not changed"
print(text_do)
text_updates = ""
#Compare current lr and the lr on expert tab:
if collection == False:
lr_current = K.eval(model_keras.optimizer.lr)
else:
lr_current = K.eval(model_keras[0].optimizer.lr)
lr_diff = learning_rate_const-lr_current
if abs(lr_diff) > 1e-6:
if collection == False:
K.set_value(model_keras.optimizer.lr, learning_rate_const)
if collection == True:
for m in model_keras:
K.set_value(m.optimizer.lr, learning_rate_const)
text_updates += "Changed the learning rate to "+ str(learning_rate_const)+"\n"
recompile = False
#Compare current optimizer and the optimizer on expert tab:
if collection==False:
optimizer_current = aid_dl.get_optimizer_name(model_keras).lower()#get the current optimizer of the model
if collection==True:
optimizer_current = aid_dl.get_optimizer_name(model_keras[0]).lower()#get the current optimizer of the model
if optimizer_current!=optimizer_expert.lower():#if the current model has a different optimizer
recompile = True
text_updates+="Changed the optimizer to "+optimizer_expert+"\n"
#Compare current loss function and the loss-function on expert tab:
if collection==False:
if model_keras.loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if collection==True:
if model_keras[0].loss!=loss_expert:
recompile = True
text_updates+="Changed the loss function to "+loss_expert+"\n"
if recompile==True:
print("Recompiling...")
if collection==False:
aid_dl.model_compile(model_keras,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
if collection==True:
for m in model_keras[1]:
aid_dl.model_compile(m, loss_expert, optimizer_settings, learning_rate_const,model_metrics, nr_classes)
if model_keras_p!=None:#if this is NOT None, there exists a parallel model, which also needs to be re-compiled
aid_dl.model_compile(model_keras_p,loss_expert,optimizer_settings,learning_rate_const,model_metrics,nr_classes)
print("Recompiled parallel model to adjust learning rate, loss, optimizer")
print(text_updates)
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_train = np.array(SelectedFiles)[ind]
SelectedFiles_train = list(SelectedFiles_train)
indices_train = [selectedfile["class"] for selectedfile in SelectedFiles_train]
nr_events_epoch_train = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_train]
rtdc_path_train = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_train]
zoom_factors_train = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_train]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_train = [selectedfile["shuffle"] for selectedfile in SelectedFiles_train]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_train])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
if verbose==1:
print("Length of DATA (in RAM) = "+str(len(self.ram)))
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles_train)):
if len(self.ram)==0: #Here, the entire training set needs to be used! Not only random images!
#Replace=true: means individual cells could occur several times
gen_train = aid_img.gen_crop_img(crop,rtdc_path_train[i],random_images=False,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen_train = aid_img.gen_crop_img_ram(self.ram,rtdc_path_train[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen_train)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "<html><head/><body><p>The standard deviation of your training data is zero! This would lead to division by zero. To avoid this, I will divide by 0.0001 instead.</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Std. is zero")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles_valid])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
############Cropping#####################
percDataV = float(self.popup_lrfinder_ui.doubleSpinBox_percDataV.value())
percDataV = percDataV/100.0
X_valid,y_valid,Indices,xtra_valid = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if len(self.ram)==0:#if there is no data available on ram
#replace=true means individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],int(np.rint(percDataV*nr_events_epoch_valid[i])),random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:#get a similar generator, using the ram-data
gen_valid = aid_img.gen_crop_img_ram(self.ram,rtdc_path_valid[i],int(np.rint(percDataV*nr_events_epoch_valid[i])),random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
generator_cropped_out = next(gen_valid)
X_valid.append(generator_cropped_out[0])
#y_valid.append(np.repeat(indices_valid[i],nr_events_epoch_valid[i]))
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(generator_cropped_out[1])
xtra_valid.append(generator_cropped_out[2])
del generator_cropped_out
X_valid = np.concatenate(X_valid)
y_valid = np.concatenate(y_valid)
Y_valid = np_utils.to_categorical(y_valid, nr_classes)# * 2 - 1
xtra_valid = np.concatenate(xtra_valid)
if len(X_valid.shape)==4:
channels=3
elif len(X_valid.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_valid.shape))
if channels==1:
#Add the "channels" dimension
X_valid = np.expand_dims(X_valid,3)
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
#Validation data can be cropped to final size already since no augmentation
#will happen on this data set
dim_val = X_valid.shape
print("Current dim. of validation set (pixels x pixels) = "+str(dim_val[2]))
if dim_val[2]!=crop:
print("Change dim. (pixels x pixels) of validation set to = "+str(crop))
remove = int(dim_val[2]/2.0 - crop/2.0)
X_valid = X_valid[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
if xtra_in==True:
print("Add Xtra Data to X_valid")
X_valid = [X_valid,xtra_valid]
###################Load training data####################
#####################and perform#########################
##################Image augmentation#####################
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
#Should only a certain percentage of the numbers given in the table be sampled?
percDataT = float(self.popup_lrfinder_ui.doubleSpinBox_percDataT.value())
percDataT = percDataT/100.0
X_train,y_train,xtra_train = [],[],[]
t3 = time.time()
for i in range(len(SelectedFiles_train)):
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen_train = aid_img.gen_crop_img(cropsize2,rtdc_path_train[i],int(np.rint(percDataT*nr_events_epoch_train[i])),random_images=shuffle_train[i],replace=True,zoom_factor=zoom_factors_train[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
gen_train = aid_img.gen_crop_img_ram(self.ram,rtdc_path_train[i],int(np.rint(percDataT*nr_events_epoch_train[i])),random_images=shuffle_train[i],replace=True,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
data_ = next(gen_train)
X_train.append(data_[0])
y_train.append(np.repeat(indices_train[i],X_train[-1].shape[0]))
if xtra_in==True:
xtra_train.append(data_[2])
del data_
X_train = np.concatenate(X_train)
X_train = X_train.astype(np.uint8)
y_train = np.concatenate(y_train)
if xtra_in==True:
print("Retrieve Xtra Data...")
xtra_train = np.concatenate(xtra_train)
t4 = time.time()
if verbose == 1:
print("Time to load data (from .rtdc or RAM) and crop="+str(t4-t3))
if len(X_train.shape)==4:
channels=3
elif len(X_train.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X_train.shape))
if channels==1:
#Add the "channels" dimension
X_train = np.expand_dims(X_train,3)
t3 = time.time()
#Affine augmentation
X_train = aid_img.affine_augm(X_train,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear) #Affine image augmentation
y_train = np.copy(y_train)
Y_train = np_utils.to_categorical(y_train, nr_classes)# * 2 - 1
t4 = time.time()
if verbose == 1:
print("Time to perform affine augmentation ="+str(t4-t3))
t3 = time.time()
#Now do the final cropping to the actual size that was set by user
dim = X_train.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
X_train = X_train[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
t4 = time.time()
#X_train = np.copy(X_train) #save into new array and do some iterations with varying noise/brightness
#reuse this X_batch_orig a few times since this augmentation was costly
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#In each iteration, start with non-augmented data
#X_batch = np.copy(X_batch_orig)#copy from X_batch_orig, X_batch will be altered without altering X_batch_orig
#X_train = X_train.astype(np.uint8)
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
X_train = X_train.astype(np.uint8)
if contrast_on:
t_con_aug_1 = time.time()
X_train = aid_img.contrast_augm_cv2(X_train,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
t_con_aug_2 = time.time()
if verbose == 1:
print("Time to augment contrast="+str(t_con_aug_2-t_con_aug_1))
if saturation_on or hue_on:
t_sat_aug_1 = time.time()
X_train = aid_img.satur_hue_augm_cv2(X_train.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta) #Gray and RGB; both values >0!
t_sat_aug_2 = time.time()
if verbose == 1:
print("Time to augment saturation/hue="+str(t_sat_aug_2-t_sat_aug_1))
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
t_avgBlur_1 = time.time()
X_train = aid_img.avg_blur_cv2(X_train,avgBlur_min,avgBlur_max)
t_avgBlur_2 = time.time()
if verbose == 1:
print("Time to perform average blurring="+str(t_avgBlur_2-t_avgBlur_1))
if gaussBlur_on:
t_gaussBlur_1 = time.time()
X_train = aid_img.gauss_blur_cv(X_train,gaussBlur_min,gaussBlur_max)
t_gaussBlur_2 = time.time()
if verbose == 1:
print("Time to perform gaussian blurring="+str(t_gaussBlur_2-t_gaussBlur_1))
if motionBlur_on:
t_motionBlur_1 = time.time()
X_train = aid_img.motion_blur_cv(X_train,motionBlur_kernel,motionBlur_angle)
t_motionBlur_2 = time.time()
if verbose == 1:
print("Time to perform motion blurring="+str(t_motionBlur_2-t_motionBlur_1))
##########Brightness noise#########
t3 = time.time()
X_train = aid_img.brightn_noise_augm_cv2(X_train,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
t4 = time.time()
if verbose == 1:
print("Time to augment brightness="+str(t4-t3))
t3 = time.time()
if norm == "StdScaling using mean and std of all training data":
X_train = aid_img.image_normalization(X_train,norm,mean_trainingdata,std_trainingdata)
else:
X_train = aid_img.image_normalization(X_train,norm)
t4 = time.time()
if verbose == 1:
print("Time to apply normalization="+str(t4-t3))
if verbose == 1:
print("X_train.shape")
print(X_train.shape)
if xtra_in==True:
print("Add Xtra Data to X_train")
X_train = [X_train,xtra_train]
###################################################
###############Actual fitting######################
###################################################
batch_size = int(self.popup_lrfinder_ui.spinBox_batchSize.value())
stepsPerEpoch = int(self.popup_lrfinder_ui.spinBox_stepsPerEpoch.value())
epochs = int(self.popup_lrfinder_ui.spinBox_epochs.value())
start_lr = float(self.popup_lrfinder_ui.lineEdit_startLr.text())
stop_lr = float(self.popup_lrfinder_ui.lineEdit_stopLr.text())
valMetrics = bool(self.popup_lrfinder_ui.checkBox_valMetrics.isChecked())
####################lr_find algorithm####################
if model_keras_p == None:
lrf = aid_dl.LearningRateFinder(model_keras)
elif model_keras_p != None:
lrf = aid_dl.LearningRateFinder(model_keras_p)
if valMetrics==True:
lrf.find([X_train,Y_train],[X_valid,Y_valid],start_lr,stop_lr,stepsPerEpoch=stepsPerEpoch,batchSize=batch_size,epochs=epochs)
else:
lrf.find([X_train,Y_train],None,start_lr,stop_lr,stepsPerEpoch=stepsPerEpoch,batchSize=batch_size,epochs=epochs)
skipBegin,skipEnd = 10,1
self.learning_rates = lrf.lrs[skipBegin:-skipEnd]
self.losses_or = lrf.losses_or[skipBegin:-skipEnd]
self.losses_sm = lrf.losses_sm[skipBegin:-skipEnd]
self.accs_or = lrf.accs_or[skipBegin:-skipEnd]
self.accs_sm = lrf.accs_sm[skipBegin:-skipEnd]
self.val_losses_sm = lrf.val_losses_sm[skipBegin:-skipEnd]
self.val_losses_or = lrf.val_losses_or[skipBegin:-skipEnd]
self.val_accs_sm = lrf.val_accs_sm[skipBegin:-skipEnd]
self.val_accs_or = lrf.val_accs_or[skipBegin:-skipEnd]
# Enable the groupboxes
self.popup_lrfinder_ui.groupBox_singleLr.setEnabled(True)
self.popup_lrfinder_ui.groupBox_LrRange.setEnabled(True)
self.update_lrfind_plot()
def update_lrfind_plot(self):
if not hasattr(self, 'learning_rates'):
return
metric = str(self.popup_lrfinder_ui.comboBox_metric.currentText())
color = self.popup_lrfinder_ui.pushButton_color.palette().button().color()
width = int(self.popup_lrfinder_ui.spinBox_lineWidth.value())
color = list(color.getRgb())
color = tuple(color)
pencolor = pg.mkPen(color, width=width)
smooth = bool(self.popup_lrfinder_ui.checkBox_smooth.isChecked())
try:# try to empty the plot
self.popup_lrfinder_ui.lr_plot.clear()
#self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_line)
except:
pass
if metric=="Loss" and smooth==True:
self.y_values = self.losses_sm
elif metric=="Loss" and smooth==False:
self.y_values = self.losses_or
elif metric=="Loss 1st derivative" and smooth==True:
self.y_values = np.diff(self.losses_sm,n=1)
elif metric=="Loss 1st derivative" and smooth==False:
self.y_values = np.diff(self.losses_or,n=1)
elif metric=="Accuracy" and smooth==True:
self.y_values = self.accs_sm
elif metric=="Accuracy" and smooth==False:
self.y_values = self.accs_or
elif metric=="Accuracy 1st derivative" and smooth==True:
self.y_values = np.diff(self.accs_sm,n=1)
elif metric=="Accuracy 1st derivative" and smooth==False:
self.y_values = np.diff(self.accs_or,n=1)
elif metric=="Val. loss" and smooth==True:
self.y_values = self.val_losses_sm
elif metric=="Val. loss" and smooth==False:
self.y_values = self.val_losses_or
elif metric=="Val. loss 1st derivative" and smooth==True:
self.y_values = np.diff(self.val_losses_sm,n=1)
elif metric=="Val. loss 1st derivative" and smooth==False:
self.y_values = np.diff(self.val_losses_or,n=1)
elif metric=="Val. accuracy" and smooth==True:
self.y_values = self.val_accs_sm
elif metric=="Val. accuracy" and smooth==False:
self.y_values = self.val_accs_or
elif metric=="Val. accuracy 1st derivative" and smooth==True:
self.y_values = np.diff(self.val_accs_sm,n=1)
elif metric=="Val. accuracy 1st derivative" and smooth==False:
self.y_values = np.diff(self.val_accs_or,n=1)
else:
print("The combination of "+str(metric)+" and smooth="+str(smooth)+" is not supported!")
if len(self.learning_rates)==len(self.y_values):
self.lr_line = pg.PlotCurveItem(x=np.log10(self.learning_rates), y=self.y_values,pen=pencolor,name=metric)
elif len(self.learning_rates)-1==len(self.y_values):
self.lr_line = pg.PlotCurveItem(x=np.log10(self.learning_rates)[1:], y=self.y_values,pen=pencolor,name=metric)
else:
print("No data available. Probably, validation metrics were not computed. Please click Run again.")
return
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_line)
#In case the groupBox_singleLr is already checked, carry out the function:
if self.popup_lrfinder_ui.groupBox_singleLr.isChecked():
self.get_lr_single(on_or_off=True)
#In case the groupBox_LrRange is already checked, carry out the function:
if self.popup_lrfinder_ui.groupBox_LrRange.isChecked():
self.get_lr_range(on_or_off=True)
def get_lr_single(self,on_or_off):
if on_or_off==True: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
ind = np.argmin(self.y_values)#find location of loss-minimum
mini_x = self.learning_rates[ind]
mini_x = np.log10(mini_x)
pen = pg.mkPen(color="w")
self.lr_single = pg.InfiniteLine(pos=mini_x, angle=90, pen=pen, movable=True)
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_single)
def position_changed():
#where did the user drag the region_linfit to?
new_position = 10**(self.lr_single.value())
self.popup_lrfinder_ui.lineEdit_singleLr.setText(str(new_position))
self.lr_single.sigPositionChangeFinished.connect(position_changed)
if on_or_off==False: #user unchecked the groupbox->remove the InfiniteLine if possible
try:
self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_single)
except:
pass
def get_lr_range(self,on_or_off):
#print(on_or_off)
#start_lr = float(self.popup_lrfinder_ui.lineEdit_startLr.text())
#stop_lr = float(self.popup_lrfinder_ui.lineEdit_stopLr.text())
if on_or_off==True: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
start_x = 0.00001
start_x = np.log10(start_x)
ind = np.argmin(self.y_values)#find location of loss-minimum
end_x = self.learning_rates[ind]
end_x = np.log10(end_x)
self.lr_region = pg.LinearRegionItem([start_x, end_x], movable=True)
self.popup_lrfinder_ui.lr_plot.addItem(self.lr_region)
def region_changed():
#where did the user drag the region_linfit to?
new_region = self.lr_region.getRegion()
new_region_left = 10**(new_region[0])
new_region_right = 10**(new_region[1])
self.popup_lrfinder_ui.lineEdit_LrMin.setText(str(new_region_left))
self.popup_lrfinder_ui.lineEdit_LrMax.setText(str(new_region_right))
self.lr_region.sigRegionChangeFinished.connect(region_changed)
if on_or_off==False: #bool(self.popup_lrfinder_ui.groupBox_LrRange.isChecked()):
try:
self.popup_lrfinder_ui.lr_plot.removeItem(self.lr_region)
except:
pass
def action_show_example_imgs(self): #this function is only for the main window
if self.actionVerbose.isChecked()==True:
verbose = 1
else:
verbose = 0
#Get state of the comboboxes!
tr_or_valid = str(self.comboBox_ShowTrainOrValid.currentText())
w_or_wo_augm = str(self.comboBox_ShowWOrWoAug.currentText())
#most of it should be similar to action_fit_model_worker
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Collect all information about the fitting routine that was user defined
crop = int(self.spinBox_imagecrop.value())
norm = str(self.comboBox_Normalization.currentText())
h_flip = bool(self.checkBox_HorizFlip.isChecked())
v_flip = bool(self.checkBox_VertFlip.isChecked())
rotation = float(self.lineEdit_Rotation.text())
width_shift = float(self.lineEdit_widthShift.text())
height_shift = float(self.lineEdit_heightShift.text())
zoom = float(self.lineEdit_zoomRange.text())
shear = float(self.lineEdit_shearRange.text())
brightness_add_lower = float(self.spinBox_PlusLower.value())
brightness_add_upper = float(self.spinBox_PlusUpper.value())
brightness_mult_lower = float(self.doubleSpinBox_MultLower.value())
brightness_mult_upper = float(self.doubleSpinBox_MultUpper.value())
gaussnoise_mean = float(self.doubleSpinBox_GaussianNoiseMean.value())
gaussnoise_scale = float(self.doubleSpinBox_GaussianNoiseScale.value())
contrast_on = bool(self.checkBox_contrast.isChecked())
contrast_lower = float(self.doubleSpinBox_contrastLower.value())
contrast_higher = float(self.doubleSpinBox_contrastHigher.value())
saturation_on = bool(self.checkBox_saturation.isChecked())
saturation_lower = float(self.doubleSpinBox_saturationLower.value())
saturation_higher = float(self.doubleSpinBox_saturationHigher.value())
hue_on = bool(self.checkBox_hue.isChecked())
hue_delta = float(self.doubleSpinBox_hueDelta.value())
avgBlur_on = bool(self.checkBox_avgBlur.isChecked())
avgBlur_min = int(self.spinBox_avgBlurMin.value())
avgBlur_max = int(self.spinBox_avgBlurMax.value())
gaussBlur_on = bool(self.checkBox_gaussBlur.isChecked())
gaussBlur_min = int(self.spinBox_gaussBlurMin.value())
gaussBlur_max = int(self.spinBox_gaussBlurMax.value())
motionBlur_on = bool(self.checkBox_motionBlur.isChecked())
motionBlur_kernel = str(self.lineEdit_motionBlurKernel.text())
motionBlur_angle = str(self.lineEdit_motionBlurAngle.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
#which index is requested by user:?
req_index = int(self.spinBox_ShowIndex.value())
if tr_or_valid=='Training':
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
elif tr_or_valid=='Validation':
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles = np.array(SelectedFiles)[ind]
SelectedFiles = list(SelectedFiles)
indices = [selectedfile["class"] for selectedfile in SelectedFiles]
ind = np.where(np.array(indices)==req_index)[0]
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no data for this class available")
msg.setWindowTitle("Class not available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
indices = list(np.array(indices)[ind])
SelectedFiles = list(np.array(SelectedFiles)[ind])
nr_events_epoch = len(indices)*[10] #[selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles]
rtdc_path = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles]
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle = [selectedfile["shuffle"] for selectedfile in SelectedFiles]
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],random_images=False) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("std_trainingdata was zero and is now set to 0.0001 to avoid div. by zero!")
if self.actionVerbose.isChecked():
print("Used all training data to get mean and std for normalization")
if w_or_wo_augm=='With Augmentation':
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
############Cropping and image augmentation#####################
#Start the first iteration:
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=True,replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
try: #When all cells are at the border of the image, the generator will be empty. Avoid program crash by try, except
X.append(next(gen)[0])
except StopIteration:
print("All events at border of image and discarded")
return
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
X = X.astype(np.uint8) #make sure we stay in uint8
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
X = np.expand_dims(X,3)#Add the "channels" dimension
else:
print("Invalid data dimension:" +str(X.shape))
X_batch, y_batch = aid_img.affine_augm(X,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear), y #Affine image augmentation
X_batch = X_batch.astype(np.uint8) #make sure we stay in uint8
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
#X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
if contrast_on:
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
if saturation_on or hue_on:
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta)
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
if gaussBlur_on:
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
if motionBlur_on:
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
X = X_batch
if verbose: print("Shape of the shown images is:"+str(X.shape))
elif w_or_wo_augm=='Original image':
############Cropping#####################
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=True,replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=True,replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
try:
X.append(next(gen)[0])
except:
return
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
X = np.expand_dims(X,3) #Add the "channels" dimension
else:
print("Invalid data dimension: " +str(X.shape))
if norm == "StdScaling using mean and std of all training data":
X = aid_img.image_normalization(X,norm,mean_trainingdata,std_trainingdata)
else:
X = aid_img.image_normalization(X,norm)
if verbose: print("Shape of the shown images is: "+str(X.shape))
#Is there already anything shown on the widget?
children = self.widget_ViewImages.findChildren(QtWidgets.QGridLayout)
if len(children)>0: #if there is something, delete it!
for i in reversed(range(self.gridLayout_ViewImages.count())):
widgetToRemove = self.gridLayout_ViewImages.itemAt(i).widget()
widgetToRemove.setParent(None)
widgetToRemove.deleteLater()
else: #else, create a Gridlayout to put the images
self.gridLayout_ViewImages = QtWidgets.QGridLayout(self.widget_ViewImages)
for i in range(5):
if channels==1:
img = X[i,:,:,0] #TensorFlow
if channels==3:
img = X[i,:,:,:] #TensorFlow
#Stretch pixel value to full 8bit range (0-255); only for display
img = img-np.min(img)
fac = np.max(img)
img = (img/fac)*255.0
img = img.astype(np.uint8)
if channels==1:
height, width = img.shape
if channels==3:
height, width, _ = img.shape
# qi=QtGui.QImage(img_zoom.data, width, height,width, QtGui.QImage.Format_Indexed8)
# self.label_image_show = QtWidgets.QLabel(self.widget_ViewImages)
# self.label_image_show.setPixmap(QtGui.QPixmap.fromImage(qi))
# self.gridLayout_ViewImages.addWidget(self.label_image_show, 1,i)
# self.label_image_show.show()
#Use pygtgraph instead, in order to allow for exporting images
self.image_show = pg.ImageView(self.widget_ViewImages)
self.image_show.show()
if verbose: print("Shape of zoomed image: "+str(img.shape))
if channels==1:
self.image_show.setImage(img.T,autoRange=False)
if channels==3:
self.image_show.setImage(np.swapaxes(img,0,1),autoRange=False)
self.image_show.ui.histogram.hide()
self.image_show.ui.roiBtn.hide()
self.image_show.ui.menuBtn.hide()
self.gridLayout_ViewImages.addWidget(self.image_show, 1,i)
self.widget_ViewImages.show()
def tableWidget_HistoryInfo_pop_dclick(self,item,listindex):
if item is not None:
tableitem = self.fittingpopups_ui[listindex].tableWidget_HistoryInfo_pop.item(item.row(), item.column())
if str(tableitem.text())!="Show saved only":
color = QtGui.QColorDialog.getColor()
if color.getRgb()==(0, 0, 0, 255):#no black!
return
else:
tableitem.setBackground(color)
#self.update_historyplot_pop(listindex)
def action_show_example_imgs_pop(self,listindex): #this function is only for the main window
#Get state of the comboboxes!
tr_or_valid = str(self.fittingpopups_ui[listindex].comboBox_ShowTrainOrValid_pop.currentText())
w_or_wo_augm = str(self.fittingpopups_ui[listindex].comboBox_ShowWOrWoAug_pop.currentText())
#most of it should be similar to action_fit_model_worker
#Used files go to a separate sheet on the MetaFile.xlsx
SelectedFiles = self.items_clicked_no_rtdc_ds()
#Collect all information about the fitting routine that was user defined
crop = int(self.fittingpopups_ui[listindex].spinBox_imagecrop_pop.value())
norm = str(self.fittingpopups_ui[listindex].comboBox_Normalization_pop.currentText())
h_flip = bool(self.fittingpopups_ui[listindex].checkBox_HorizFlip_pop.isChecked())
v_flip = bool(self.fittingpopups_ui[listindex].checkBox_VertFlip_pop.isChecked())
rotation = float(self.fittingpopups_ui[listindex].lineEdit_Rotation_pop.text())
width_shift = float(self.fittingpopups_ui[listindex].lineEdit_widthShift_pop.text())
height_shift = float(self.fittingpopups_ui[listindex].lineEdit_heightShift_pop.text())
zoom = float(self.fittingpopups_ui[listindex].lineEdit_zoomRange_pop.text())
shear = float(self.fittingpopups_ui[listindex].lineEdit_shearRange_pop.text())
brightness_add_lower = float(self.fittingpopups_ui[listindex].spinBox_PlusLower_pop.value())
brightness_add_upper = float(self.fittingpopups_ui[listindex].spinBox_PlusUpper_pop.value())
brightness_mult_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultLower_pop.value())
brightness_mult_upper = float(self.fittingpopups_ui[listindex].doubleSpinBox_MultUpper_pop.value())
gaussnoise_mean = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseMean_pop.value())
gaussnoise_scale = float(self.fittingpopups_ui[listindex].doubleSpinBox_GaussianNoiseScale_pop.value())
contrast_on = bool(self.fittingpopups_ui[listindex].checkBox_contrast_pop.isChecked())
contrast_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastLower_pop.value())
contrast_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_contrastHigher_pop.value())
saturation_on = bool(self.fittingpopups_ui[listindex].checkBox_saturation_pop.isChecked())
saturation_lower = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationLower_pop.value())
saturation_higher = float(self.fittingpopups_ui[listindex].doubleSpinBox_saturationHigher_pop.value())
hue_on = bool(self.fittingpopups_ui[listindex].checkBox_hue_pop.isChecked())
hue_delta = float(self.fittingpopups_ui[listindex].doubleSpinBox_hueDelta_pop.value())
avgBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_avgBlur_pop.isChecked())
avgBlur_min = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMin_pop.value())
avgBlur_max = int(self.fittingpopups_ui[listindex].spinBox_avgBlurMax_pop.value())
gaussBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_gaussBlur_pop.isChecked())
gaussBlur_min = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMin_pop.value())
gaussBlur_max = int(self.fittingpopups_ui[listindex].spinBox_gaussBlurMax_pop.value())
motionBlur_on = bool(self.fittingpopups_ui[listindex].checkBox_motionBlur_pop.isChecked())
motionBlur_kernel = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurKernel_pop.text())
motionBlur_angle = str(self.fittingpopups_ui[listindex].lineEdit_motionBlurAngle_pop.text())
motionBlur_kernel = tuple(ast.literal_eval(motionBlur_kernel)) #translate string in the lineEdits to a tuple
motionBlur_angle = tuple(ast.literal_eval(motionBlur_angle)) #translate string in the lineEdits to a tuple
paddingMode = str(self.fittingpopups_ui[listindex].comboBox_paddingMode_pop.currentText()).lower()
#which index is requested by user:?
req_index = int(self.fittingpopups_ui[listindex].spinBox_ShowIndex_pop.value())
if tr_or_valid=='Training':
######################Load the Training Data################################
ind = [selectedfile["TrainOrValid"] == "Train" for selectedfile in SelectedFiles]
elif tr_or_valid=='Validation':
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
SelectedFiles = np.array(SelectedFiles)[ind]
SelectedFiles = list(SelectedFiles)
indices = [selectedfile["class"] for selectedfile in SelectedFiles]
ind = np.where(np.array(indices)==req_index)[0]
if len(ind)<1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no data for this class available")
msg.setWindowTitle("Class not available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
indices = list(np.array(indices)[ind])
SelectedFiles = list(np.array(SelectedFiles)[ind])
nr_events_epoch = len(indices)*[10] #[selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles]
rtdc_path = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles]
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle = [selectedfile["shuffle"] for selectedfile in SelectedFiles]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#If the scaling method is "divide by mean and std of the whole training set":
if norm == "StdScaling using mean and std of all training data":
mean_trainingdata,std_trainingdata = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
else:
if len(self.ram)==0:
gen = aid_img.gen_crop_img(crop,rtdc_path[i],random_images=False,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],random_images=False,xtra_in=xtra_in) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
images = next(gen)[0]
mean_trainingdata.append(np.mean(images))
std_trainingdata.append(np.std(images))
mean_trainingdata = np.mean(np.array(mean_trainingdata))
std_trainingdata = np.mean(np.array(std_trainingdata))
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("std_trainingdata turned out to be zero. I set it to 0.0001, to avoid division by zero!")
if self.actionVerbose.isChecked():
print("Used all training data to get mean and std for normalization")
if w_or_wo_augm=='With Augmentation':
###############Continue with training data:augmentation############
#Rotating could create edge effects. Avoid this by making crop a bit larger for now
#Worst case would be a 45degree rotation:
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
############Get cropped images with image augmentation#####################
#Start the first iteration:
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(cropsize2,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=shuffle[i],replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
X.append(next(gen)[0])
#y.append(np.repeat(indices[i],nr_events_epoch[i]))
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels=3
elif len(X.shape)==3:
channels=1
else:
print("Invalid data dimension:" +str(X.shape))
if channels==1:
#Add the "channels" dimension
X = np.expand_dims(X,3)
X_batch, y_batch = aid_img.affine_augm(X,v_flip,h_flip,rotation,width_shift,height_shift,zoom,shear), y #Affine image augmentation
X_batch = X_batch.astype(np.uint8) #make sure we stay in uint8
#Now do the final cropping to the actual size that was set by user
dim = X_batch.shape
if dim[2]!=crop:
remove = int(dim[2]/2.0 - crop/2.0)
#X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
X_batch = X_batch[:,remove:remove+crop,remove:remove+crop,:] #crop to crop x crop pixels #TensorFlow
##########Contrast/Saturation/Hue augmentation#########
#is there any of contrast/saturation/hue augmentation to do?
if contrast_on:
X_batch = aid_img.contrast_augm_cv2(X_batch,contrast_lower,contrast_higher) #this function is almost 15 times faster than random_contrast from tf!
if saturation_on or hue_on:
X_batch = aid_img.satur_hue_augm_cv2(X_batch.astype(np.uint8),saturation_on,saturation_lower,saturation_higher,hue_on,hue_delta)
##########Average/Gauss/Motion blurring#########
#is there any of blurring to do?
if avgBlur_on:
X_batch = aid_img.avg_blur_cv2(X_batch,avgBlur_min,avgBlur_max)
if gaussBlur_on:
X_batch = aid_img.gauss_blur_cv(X_batch,gaussBlur_min,gaussBlur_max)
if motionBlur_on:
X_batch = aid_img.motion_blur_cv(X_batch,motionBlur_kernel,motionBlur_angle)
X_batch = aid_img.brightn_noise_augm_cv2(X_batch,brightness_add_lower,brightness_add_upper,brightness_mult_lower,brightness_mult_upper,gaussnoise_mean,gaussnoise_scale)
if norm == "StdScaling using mean and std of all training data":
X_batch = aid_img.image_normalization(X_batch,norm,mean_trainingdata,std_trainingdata)
else:
X_batch = aid_img.image_normalization(X_batch,norm)
X = X_batch
elif w_or_wo_augm=='Original image':
############Cropping#####################
X,y = [],[]
for i in range(len(SelectedFiles)):
if not self.actionDataToRam.isChecked():
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
if len(self.ram)==0:
#Replace true means that individual cells could occur several times
gen = aid_img.gen_crop_img(crop,rtdc_path[i],10,random_images=shuffle[i],replace=True,zoom_factor=zoom_factors[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode)
else:
gen = aid_img.gen_crop_img_ram(self.ram,rtdc_path[i],10,random_images=shuffle[i],replace=True) #Replace true means that individual cells could occur several times
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
X.append(next(gen)[0])
#y.append(np.repeat(indices[i],nr_events_epoch[i]))
y.append(np.repeat(indices[i],X[-1].shape[0]))
X = np.concatenate(X)
y = np.concatenate(y)
if len(X.shape)==4:
channels = 3
elif len(X.shape)==3:
channels = 1
X = np.expand_dims(X,3)#Add the "channels" dimension
else:
print("Invalid data dimension:" +str(X.shape))
if norm == "StdScaling using mean and std of all training data":
X = aid_img.image_normalization(X,norm,mean_trainingdata,std_trainingdata)
else:
X = aid_img.image_normalization(X,norm)
#Is there already anything shown on the widget?
children = self.fittingpopups_ui[listindex].widget_ViewImages_pop.findChildren(QtWidgets.QGridLayout)
if len(children)>0: #if there is something, delete it!
for i in reversed(range(self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.count())):
widgetToRemove = self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.itemAt(i).widget()
widgetToRemove.setParent(None)
widgetToRemove.deleteLater()
else: #else, create a Gridlayout to put the images
self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop = QtWidgets.QGridLayout(self.fittingpopups_ui[listindex].widget_ViewImages_pop)
for i in range(5):
if channels==1:
img = X[i,:,:,0]
if channels==3:
img = X[i,:,:,:]
#Normalize image to full 8bit range (from 0 to 255)
img = img-np.min(img)
fac = np.max(img)
img = (img/fac)*255.0
img = img.astype(np.uint8)
# height, width = img_zoom.shape
# qi=QtGui.QImage(img_zoom.data, width, height,width, QtGui.QImage.Format_Indexed8)
# self.label_image_show = QtWidgets.QLabel(self.widget_ViewImages)
# self.label_image_show.setPixmap(QtGui.QPixmap.fromImage(qi))
# self.gridLayout_ViewImages_pop.addWidget(self.label_image_show, 1,i)
# self.label_image_show.show()
#Use pygtgraph instead, in order to allow for exporting images
self.fittingpopups_ui[listindex].image_show_pop = pg.ImageView(self.fittingpopups_ui[listindex].widget_ViewImages_pop)
self.fittingpopups_ui[listindex].image_show_pop.show()
if channels==1:
self.fittingpopups_ui[listindex].image_show_pop.setImage(img.T,autoRange=False)
if channels==3:
self.fittingpopups_ui[listindex].image_show_pop.setImage(np.swapaxes(img,0,1),autoRange=False)
self.fittingpopups_ui[listindex].image_show_pop.ui.histogram.hide()
self.fittingpopups_ui[listindex].image_show_pop.ui.roiBtn.hide()
self.fittingpopups_ui[listindex].image_show_pop.ui.menuBtn.hide()
self.fittingpopups_ui[listindex].gridLayout_ViewImages_pop.addWidget(self.fittingpopups_ui[listindex].image_show_pop, 1,i)
self.fittingpopups_ui[listindex].widget_ViewImages_pop.show()
def get_color_mode(self):
if str(self.comboBox_GrayOrRGB.currentText())=="Grayscale":
return "Grayscale"
elif str(self.comboBox_GrayOrRGB.currentText())=="RGB":
return "RGB"
else:
return None
def checkBox_rollingMedian_statechange(self,item):#used in frontend
self.horizontalSlider_rollmedi.setEnabled(item)
def update_historyplot(self):
#After loading a history, there are checkboxes available. Check, if user checked some:
colcount = self.tableWidget_HistoryItems.columnCount()
#Collect items that are checked
selected_items = []
Colors = []
for colposition in range(colcount):
#get checkbox item and; is it checked?
cb = self.tableWidget_HistoryItems.item(0, colposition)
if not cb==None:
if cb.checkState() == QtCore.Qt.Checked:
selected_items.append(str(cb.text()))
Colors.append(cb.background())
#Get a list of the color from the background of the table items
DF1 = self.loaded_history
#Clear the plot
self.widget_Scatterplot.clear()
#Add plot
self.plt1 = self.widget_Scatterplot.addPlot()
self.plt1.showGrid(x=True,y=True)
self.plt1.addLegend()
self.plt1.setLabel('bottom', 'Epoch', units='')
self.plot_rollmedis = [] #list for plots of rolling medians
if "Show saved only" in selected_items:
#nr_of_selected_items = len(selected_items)-1
#get the "Saved" column from DF1
saved = DF1["Saved"]
saved = np.where(np.array(saved==1))[0]
# else:
# nr_of_selected_items = len(selected_items)
self.Colors = Colors
scatter_x,scatter_y = [],[]
for i in range(len(selected_items)):
key = selected_items[i]
if key!="Show saved only":
df = DF1[key]
epochs = range(len(df))
win = int(self.horizontalSlider_rollmedi.value())
rollmedi = df.rolling(window=win).median()
if "Show saved only" in selected_items:
df = np.array(df)[saved]
epochs = np.array(epochs)[saved]
rollmedi = pd.DataFrame(df).rolling(window=win).median()
scatter_x.append(epochs)
scatter_y.append(df)
color = self.Colors[i]
pen_rollmedi = list(color.color().getRgb())
pen_rollmedi = pg.mkColor(pen_rollmedi)
pen_rollmedi = pg.mkPen(color=pen_rollmedi,width=6)
color = list(color.color().getRgb())
color[-1] = int(0.6*color[-1])
color = tuple(color)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
self.plt1.plot(epochs, df,pen=None,symbol='o',symbolPen=None,symbolBrush=brush,name=key,clear=False)
if bool(self.checkBox_rollingMedian.isChecked()):#Should a rolling median be plotted?
try:
rollmedi = np.array(rollmedi).reshape(rollmedi.shape[0])
rm = self.plt1.plot(np.array(epochs), rollmedi,pen=pen_rollmedi,clear=False)
self.plot_rollmedis.append(rm)
except Exception as e:
#There is an issue for the rolling median plotting!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(str(e)+"\n->There are likely too few points to have a rolling median with such a window size ("+str(round(win))+")")
msg.setWindowTitle("Error occured when plotting rolling median:")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
if len(str(self.lineEdit_LoadHistory.text()))==0:
#if DF1==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please load History file first (.meta)")
msg.setWindowTitle("No History file loaded")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if len(scatter_x)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please select at least one of " +"\n".join(list(DF1.keys())))
msg.setWindowTitle("No quantity selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Keep the information as lists available for this function
self.scatter_x_l, self.scatter_y_l = scatter_x,scatter_y
if bool(self.checkBox_linearFit.isChecked()):
#Put a liner region on the plot; cover the last 10% of points
if np.max(np.concatenate(scatter_x))<12:
start_x = 0
end_x = np.max(np.concatenate(scatter_x))+1
else:
start_x = int(0.9*np.max(np.concatenate(scatter_x)))
end_x = int(1.0*np.max(np.concatenate(scatter_x)))
self.region_linfit = pg.LinearRegionItem([start_x, end_x], bounds=[-np.inf,np.inf], movable=True)
self.plt1.addItem(self.region_linfit)
def region_changed():
try: #clear the plot from other fits if there are any
if len(self.plot_fits)>0:
for i in range(len(self.plot_fits)):
self.plt1.legend.removeItem(self.names[i])
self.plt1.removeItem(self.plot_fits[i])
except:
pass
#where did the user drag the region_linfit to?
new_region = self.region_linfit.getRegion()
#for each curve, do a linear regression
self.plot_fits,self.names = [], []
for i in range(len(self.scatter_x_l)):
scatter_x_vals = np.array(self.scatter_x_l[i])
ind = np.where( (scatter_x_vals<new_region[1]) & (scatter_x_vals>new_region[0]) )
scatter_x_vals = scatter_x_vals[ind]
scatter_y_vals = np.array(self.scatter_y_l[i])[ind]
if len(scatter_x_vals)>1:
fit = np.polyfit(scatter_x_vals,scatter_y_vals,1)
fit_y = fit[0]*scatter_x_vals+fit[1]
pencolor = pg.mkColor(self.Colors[i].color())
pen = pg.mkPen(color=pencolor,width=6)
text = 'y='+("{:.2e}".format(fit[0]))+"x + " +("{:.2e}".format(fit[1]))
self.names.append(text)
self.plot_fits.append(self.plt1.plot(name=text))
self.plot_fits[i].setData(scatter_x_vals,fit_y,pen=pen,clear=False,name=text)
self.region_linfit.sigRegionChangeFinished.connect(region_changed)
def slider_changed():
if bool(self.checkBox_rollingMedian.isChecked()):
#remove other rolling median lines:
for i in range(len(self.plot_rollmedis)):
self.plt1.removeItem(self.plot_rollmedis[i])
#Start with fresh list
self.plot_rollmedis = []
win = int(self.horizontalSlider_rollmedi.value())
for i in range(len(self.scatter_x_l)):
epochs = np.array(self.scatter_x_l[i])
if type(self.scatter_y_l[i]) == pd.core.frame.DataFrame:
rollmedi = self.scatter_y_l[i].rolling(window=win).median()
else:
rollmedi = pd.DataFrame(self.scatter_y_l[i]).rolling(window=win).median()
rollmedi = np.array(rollmedi).reshape(rollmedi.shape[0])
pencolor = pg.mkColor(self.Colors[i].color())
pen_rollmedi = pg.mkPen(color=pencolor,width=6)
rm = self.plt1.plot(np.array(epochs), rollmedi,pen=pen_rollmedi,clear=False)
self.plot_rollmedis.append(rm)
self.horizontalSlider_rollmedi.sliderMoved.connect(slider_changed)
scatter_x = np.concatenate(scatter_x)
scatter_y = np.concatenate(scatter_y)
scatter_x_norm = (scatter_x.astype(float))/float(np.max(scatter_x))
scatter_y_norm = (scatter_y.astype(float))/float(np.max(scatter_y))
self.model_was_selected_before = False
def onClick(event):
#Get all plotting items
#if len(self.plt1.listDataItems())==nr_of_selected_items+1:
#delete the last item if the user selected already one:
if self.model_was_selected_before:
self.plt1.removeItem(self.plt1.listDataItems()[-1])
items = self.widget_Scatterplot.scene().items(event.scenePos())
#get the index of the viewbox
isviewbox = [type(item)==pg.graphicsItems.ViewBox.ViewBox for item in items]
index = np.where(np.array(isviewbox)==True)[0]
vb = np.array(items)[index]
try: #when user rescaed the vew and clicks somewhere outside, it could appear an IndexError.
clicked_x = float(vb[0].mapSceneToView(event.scenePos()).x())
clicked_y = float(vb[0].mapSceneToView(event.scenePos()).y())
except:
return
try:
a1 = (clicked_x)/float(np.max(scatter_x))
a2 = (clicked_y)/float(np.max(scatter_y))
except Exception as e:
print(str(e))
return
#Which is the closest scatter point?
dist = np.sqrt(( a1-scatter_x_norm )**2 + ( a2-scatter_y_norm )**2)
index = np.argmin(dist)
clicked_x = scatter_x[index]
clicked_y = scatter_y[index]
#Update the spinBox
#self.spinBox_ModelIndex.setValue(int(clicked_x))
#Modelindex for textBrowser_SelectedModelInfo
text_index = "\nModelindex: "+str(clicked_x)
#Indicate the selected model on the scatter plot
self.plt1.plot([clicked_x], [clicked_y],pen=None,symbol='o',symbolPen='w',clear=False)
#Get more information about this model
Modelname = str(self.loaded_para["Modelname"].iloc[0])
path, filename = os.path.split(Modelname)
filename = filename.split(".model")[0]+"_"+str(clicked_x)+".model"
path = os.path.join(path,filename)
if os.path.isfile(path):
text_path = "\nFile is located in:"+path
else:
text_path = "\nFile not found!:"+path+"\nProbably the .model was deleted or not saved"
text_acc = str(DF1.iloc[clicked_x])
self.textBrowser_SelectedModelInfo.setText("Loaded model: "+filename+text_index+text_path+"\nPerformance:\n"+text_acc)
self.model_was_selected_before = True
self.model_2_convert = path
self.widget_Scatterplot.scene().sigMouseClicked.connect(onClick)
def action_load_history(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta file (*meta.xlsx)")
filename = filename[0]
if not filename.endswith("meta.xlsx"):
return
if not os.path.isfile(filename):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("File not found")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.lineEdit_LoadHistory.setText(filename)
self.action_plot_history(filename)
def action_load_history_current(self):
if self.model_keras_path==None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There is no fitting going on")
msg.setWindowTitle("No current fitting process!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
history_path = self.model_keras_path
if type(history_path)==list:#collection=True
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Not implemented for collections. Please use 'Load History' button to specify a single .meta file")
msg.setWindowTitle("Not implemented for collecitons")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
filename = history_path.split("_0.model")[0]+"_meta.xlsx"
if not filename.endswith("meta.xlsx"):
return
if not os.path.isfile(filename):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("File not found")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.lineEdit_LoadHistory.setText(filename)
self.action_plot_history(filename)
def action_plot_history(self,filename):
#If there is a file, it can happen that fitting is currently going on
#and with bad luck AID just tries to write to the file. This would cause a crash.
#Therfore, first try to copy the file to a temporary folder. If that fails,
#wait 1 seconds and try again
#There needs to be a "temp" folder. If there os none, create it!
#does temp exist?
tries = 0 #during fitting, AID sometimes wants to write to the history file. In this case we cant read
try:
while tries<15:#try a few times
try:
temp_path = aid_bin.create_temp_folder()#create a temp folder if it does not already exist
#Create a random filename for a temp. file
someletters = list("STERNBURGPILS")
temporaryfile = np.random.choice(someletters,5,replace=True)
temporaryfile = "".join(temporaryfile)+".xlsx"
temporaryfile = os.path.join(temp_path,temporaryfile)
shutil.copyfile(filename,temporaryfile) #copy the original excel file there
dic = pd.read_excel(temporaryfile,sheet_name='History',index_col=0) #open it there
self.loaded_history = dic
para = pd.read_excel(temporaryfile,sheet_name='Parameters')
print(temporaryfile)
#delete the tempfile
os.remove(temporaryfile)
self.loaded_para = para
tries = 16
except:
time.sleep(1.5)
tries+=1
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Check if dic exists now
try:
keys = list(dic.keys())
except Exception as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
#sort the list alphabetically
keys_ = [l.lower() for l in keys]
ind_sort = np.argsort(keys_)
keys = list(np.array(keys)[ind_sort])
#First keys should always be acc,loss,val_acc,val_loss -in this order
keys_first = ["acc","loss","val_acc","val_loss"]
for i in range(len(keys_first)):
if keys_first[i] in keys:
ind = np.where(np.array(keys)==keys_first[i])[0][0]
if ind!=i:
del keys[ind]
keys.insert(i,keys_first[i])
#Lastly check if there is "Saved" or "Time" present and shift it to the back
keys_last = ["Saved","Time"]
for i in range(len(keys_last)):
if keys_last[i] in keys:
ind = np.where(np.array(keys)==keys_last[i])[0][0]
if ind!=len(keys):
del keys[ind]
keys.append(keys_last[i])
self.tableWidget_HistoryItems.setColumnCount(len(keys)+1) #+1 because of "Show saved only"
#for each key, put a checkbox on the tableWidget_HistoryInfo_pop
rowPosition = self.tableWidget_HistoryItems.rowCount()
if rowPosition==0:
self.tableWidget_HistoryItems.insertRow(0)
else:
rowPosition=0
for columnPosition in range(len(keys)):#(2,4):
key = keys[columnPosition]
item = QtWidgets.QTableWidgetItem(str(key))#("item {0} {1}".format(rowNumber, columnNumber))
item.setBackground(QtGui.QColor(self.colorsQt[columnPosition]))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.tableWidget_HistoryItems.setItem(rowPosition, columnPosition, item)
#One checkbox at the end to switch on/of to show only the models that are saved
columnPosition = len(keys)
item = QtWidgets.QTableWidgetItem("Show saved only")#("item {0} {1}".format(rowNumber, columnNumber))
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.tableWidget_HistoryItems.setItem(rowPosition, columnPosition, item)
self.tableWidget_HistoryItems.resizeColumnsToContents()
self.tableWidget_HistoryItems.resizeRowsToContents()
def history_tab_get_model_path(self):#Let user define a model he would like to convert
#pushButton_LoadModel
#Open a QFileDialog
filepath = QtWidgets.QFileDialog.getOpenFileName(self, 'Select a trained model you want to convert', Default_dict["Path of last model"],"Keras Model file (*.model)")
filepath = filepath[0]
if os.path.isfile(filepath):
self.model_2_convert = filepath
path, filename = os.path.split(filepath)
try:
modelindex = filename.split(".model")[0]
modelindex = int(modelindex.split("_")[-1])
except:
modelindex = np.nan
self.textBrowser_SelectedModelInfo.setText("Error loading model")
return
text = "Loaded model: "+filename+"\nModelindex: "+str(modelindex)+"\nFile is located in: "+filepath
self.textBrowser_SelectedModelInfo.setText(text)
def history_tab_convertModel(self):
#Check if there is text in textBrowser_SelectedModelInfo
path = self.model_2_convert
try:
os.path.isfile(path)
except:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No file defined!")
msg.setWindowTitle("No file defined!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if not os.path.isfile(path):
#text_path = "\nFile not found!:"+path+"\nProbably the .model was deleted or not saved"
#self.pushButton_convertModel.setEnabled(False)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("\nFile not found!:"+path+"\nProbably the .model was deleted or not saved")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If the source format is Keras tensforflow:
source_format = str(self.combobox_initial_format.currentText())
target_format = str(self.comboBox_convertTo.currentText()) #What is the target format?
##TODO: All conversion methods to multiprocessing functions!
def conversion_successful_msg(text):#Enable the Convert to .nnet button
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(text)
msg.setWindowTitle("Successfully converted model!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
##################Keras TensorFlow -> .nnet############################
if target_format==".nnet" and source_format=="Keras TensorFlow":
ConvertToNnet = 1
worker = Worker(self.history_tab_convertModel_nnet_worker,ConvertToNnet)
def get_model_keras_from_worker(dic):
self.model_keras = dic["model_keras"]
worker.signals.history.connect(get_model_keras_from_worker)
def conversion_successful(i):#Enable the Convert to .nnet button
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "Conversion Keras TensorFlow -> .nnet done"
msg.setText(text)
msg.setWindowTitle("Successfully converted model!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#self.pushButton_convertModel.setEnabled(True)
worker.signals.history.connect(conversion_successful)
self.threadpool.start(worker)
##################Keras TensorFlow -> Frozen .pb#######################
elif target_format=="Frozen TensorFlow .pb" and source_format=="Keras TensorFlow":
#target filename should be like source +_frozen.pb
path_new = os.path.splitext(path)[0] + "_frozen.pb"
aid_dl.convert_kerastf_2_frozen_pb(path,path_new)
text = "Conversion Keras TensorFlow -> Frozen .pb is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> Optimized .pb####################
elif target_format=="Optimized TensorFlow .pb" and source_format=="Keras TensorFlow":
path_new = os.path.splitext(path)[0] + "_optimized.pb"
aid_dl.convert_kerastf_2_optimized_pb(path,path_new)
text = "Conversion Keras TensorFlow -> Optimized .pb is done"
conversion_successful_msg(text)
####################Frozen -> Optimized .pb############################
elif target_format=="Optimized TensorFlow .pb" and source_format=="Frozen TensorFlow .pb":
path_new = os.path.splitext(path)[0] + "_optimized.pb"
aid_dl.convert_frozen_2_optimized_pb(path,path_new)
text = "Conversion Frozen -> Optimized .pb is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> ONNX####################
elif target_format=="ONNX (via keras2onnx)" and source_format=="Keras TensorFlow":
path_new = os.path.splitext(path)[0] + ".onnx"
aid_dl.convert_kerastf_2_onnx(path,path_new)
text = "Conversion Keras TensorFlow -> ONNX (via keras2onnx) is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> ONNX via MMdnn####################
elif target_format=="ONNX (via MMdnn)" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_onnx_mmdnn(path)
text = "Conversion Keras TensorFlow -> ONNX (via MMdnn) is done"
conversion_successful_msg(text)
##################Keras TensorFlow -> PyTorch Script####################
elif target_format=="PyTorch Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"pytorch")
text = "Conversion Keras TensorFlow -> PyTorch Script is done. You can now use this script and the saved weights to build the model using your PyTorch installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> Caffe Script####################
elif target_format=="Caffe Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"caffe")
text = "Conversion Keras TensorFlow -> Caffe Script is done. You can now use this script and the saved weights to build the model using your Caffe installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> CNTK Script####################
elif target_format=="CNTK Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"cntk")
text = "Conversion Keras TensorFlow -> CNTK Script is done. You can now use this script and the saved weights to build the model using your CNTK installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> mxnet Script####################
elif target_format=="MXNet Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"mxnet")
text = "Conversion Keras TensorFlow -> MXNet Script is done. You can now use this script and the saved weights to build the model using your MXNet installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> onnx Script####################
elif target_format=="ONNX Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"onnx")
text = "Conversion Keras TensorFlow -> ONNX Script is done. You can now use this script and the saved weights to build the model using your ONNX installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> TensorFlow Script####################
elif target_format=="TensorFlow Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"tensorflow")
text = "Conversion Keras TensorFlow -> TensorFlow Script is done. You can now use this script and the saved weights to build the model using your Tensorflow installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> Keras Script####################
elif target_format=="Keras Script" and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_script(path,"keras")
text = "Conversion Keras TensorFlow -> Keras Script is done. You can now use this script and the saved weights to build the model using your Keras installation."
conversion_successful_msg(text)
##################Keras TensorFlow -> CoreML####################
elif "CoreML" in target_format and source_format=="Keras TensorFlow":
aid_dl.convert_kerastf_2_coreml(path)
text = "Conversion Keras TensorFlow -> CoreML is done."
conversion_successful_msg(text)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Not implemeted (yet)")
msg.setWindowTitle("Not implemeted (yet)")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#If that worked without error, save the filepath for next time
Default_dict["Path of last model"] = os.path.split(path)[0]
aid_bin.save_aid_settings(Default_dict)
def history_tab_convertModel_nnet_worker(self,ConvertToNnet,progress_callback,history_callback):
#Define a new session -> Necessary for threading in TensorFlow
#with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
with tf.Session() as sess:
path = self.model_2_convert
try:
model_keras = load_model(path,custom_objects=aid_dl.get_custom_metrics())
except:
model_keras = load_model(path)
dic = {"model_keras":model_keras}
history_callback.emit(dic)
progress_callback.emit(1)
if ConvertToNnet==1:
#Since this happened in a thread, TensorFlow cant access it anywhere else
#Therefore perform Conversion to nnet right away:
model_config = model_keras.get_config()#["layers"]
if type(model_config)==dict:
model_config = model_config["layers"]#for keras version>2.2.3, there is a change in the output of get_config()
#Convert model to theano weights format (Only necesary for CNNs)
for layer in model_keras.layers:
if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
original_w = K.get_value(layer.W)
converted_w = convert_kernel(original_w)
K.set_value(layer.W, converted_w)
nnet_path, nnet_filename = os.path.split(self.model_2_convert)
nnet_filename = nnet_filename.split(".model")[0]+".nnet"
out_path = os.path.join(nnet_path,nnet_filename)
aid_dl.dump_to_simple_cpp(model_keras=model_keras,model_config=model_config,output=out_path,verbose=False)
# sess.close()
# try:
# aid_dl.reset_keras()
# except:
# print("Could not reset Keras (1)")
def history_tab_ConvertToNnet(self):
print("Not used")
# model_keras = self.model_keras
# model_config = model_keras.get_config()["layers"]
# #Convert model to theano weights format (Only necesary for CNNs)
# for layer in model_keras.layers:
# if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
# original_w = K.get_value(layer.W)
# converted_w = convert_kernel(original_w)
# K.set_value(layer.W, converted_w)
#
# nnet_path, nnet_filename = os.path.split(self.model_2_convert)
# nnet_filename = nnet_filename.split(".model")[0]+".nnet"
# out_path = os.path.join(nnet_path,nnet_filename)
# aid_dl.dump_to_simple_cpp(model_keras=model_keras,model_config=model_config,output=out_path,verbose=False)
# msg = QtWidgets.QMessageBox()
# msg.setIcon(QtWidgets.QMessageBox.Information)
# msg.setText("Successfully converted model and saved to\n"+out_path)
# msg.setWindowTitle("Successfully converted model!")
# msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
# msg.exec_()
# self.pushButton_convertModel.setEnabled(False)
#TODO
def test_nnet(self):
#I need a function which calls a cpp app that uses the nnet and applies
#it on a random image.
#The same image is also used as input the the original .model and
#both results are then compared
print("Not implemented yet")
print("Placeholder")
print("Building site")
def actionDocumentation_function(self):
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "Currently, there is no detailed written documentation. AIDeveloper instead makes strong use of tooltips."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Documentation")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionSoftware_function(self):
if sys.platform == "win32":
plat = "win"
elif sys.platform=="darwin":
plat = "mac"
elif sys.platform=="linux":
plat = "linux"
else:
print("Unknown Operating system")
plat = "Win"
dir_deps = os.path.join(dir_root,"aid_dependencies_"+plat+".txt")#dir to aid_dependencies
f = open(dir_deps, "r")
text_modules = f.read()
f.close()
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "<html><head/><body><p>AIDeveloper "+str(VERSION)+"<br>"+sys.version+"<br>Click 'Show Details' to retrieve a list of all Python packages used."+"<br>AID_GPU uses CUDA (NVIDIA) to facilitate GPU processing</p></body></html>"
msg.setText(text)
msg.setDetailedText(text_modules)
msg.setWindowTitle("Software")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionAbout_function(self):
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "AIDeveloper is written and maintained by <NAME>. Use <EMAIL> to contact the main developer if you find bugs or if you wish a particular feature. Icon theme 2 was mainly designed and created by <NAME>."
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("About")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def actionLoadSession_function(self):
#This function should allow to select and load a metafile and
#Put the GUI the the corresponing state (place the stuff in the table, click Train/Valid)
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open meta-data', Default_dict["Path of last model"],"AIDeveloper Meta or session file (*meta.xlsx *session.xlsx)")
filename = filename[0]
if len(filename)==0:
return
xlsx = pd.ExcelFile(filename)
UsedData = pd.read_excel(xlsx,sheet_name="UsedData")
Files = list(UsedData["rtdc_path"])
file_exists = [os.path.exists(url) for url in Files]
ind_true = np.where(np.array(file_exists)==True)[0]
UsedData_true = UsedData.iloc[ind_true]
Files_true = list(UsedData_true["rtdc_path"]) #select the indices that are valid
#Add stuff to table_dragdrop
rowPosition = int(self.table_dragdrop.rowCount())
self.dataDropped(Files_true)
#update the index, train/valid checkbox and shuffle checkbox
for i in range(len(Files_true)):
#set the index (celltype)
try:
index = int(np.array(UsedData_true["class"])[i])
except:
index = int(np.array(UsedData_true["index"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.cellWidget(rowPosition+i, 1).setValue(index)
#is it checked for train or valid?
trorvalid = str(np.array(UsedData_true["TrainOrValid"])[i])
if trorvalid=="Train":
self.table_dragdrop.item(rowPosition+i, 2).setCheckState(QtCore.Qt.Checked)
elif trorvalid=="Valid":
self.table_dragdrop.item(rowPosition+i, 3).setCheckState(QtCore.Qt.Checked)
#how many cells/epoch during training or validation?
try:
nr_events_epoch = str(np.array(UsedData_true["nr_events_epoch"])[i])
except:
nr_events_epoch = str(np.array(UsedData_true["nr_cells_epoch"])[i])
self.table_dragdrop.item(rowPosition+i, 6).setText(nr_events_epoch)
#Shuffle or not?
shuffle = bool(np.array(UsedData_true["shuffle"])[i])
if shuffle==False:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Unchecked)
#Set Cells/Epoch to not editable
item = self.table_dragdrop.item(rowPosition+i, 6)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
else:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Checked)
#zoom_factor = float(np.array(UsedData_true["zoom_factor"])[i])
zoom_factor = str(np.array(UsedData_true["zoom_factor"])[i])
self.table_dragdrop.item(rowPosition+i, 9).setText(zoom_factor)
#Now take care of missing data
#Take care of missing files (they might have been moved to a different location)
ind_false = np.where(np.array(file_exists)==False)[0]
#Files_false = list(UsedData_false["rtdc_path"]) #select the indices that are valid
if len(ind_false)>0:
UsedData_false = UsedData.iloc[ind_false]
Files_false = list(UsedData_false["rtdc_path"]) #select the indices that are valid
self.dataDropped(Files_false)
self.user_selected_path = None
#Create popup that informs user that there is missing data and let him specify a location
#to search for the missing files
def add_missing_files():
filename = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select directory', Default_dict["Path of last model"])
user_selected_path = filename
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Invalid directory")
msg.setWindowTitle("Invalid directory")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#get the hashes
hashes = list(np.array(UsedData_false["hash"])[ind_false])
paths = list(np.array(UsedData_false["rtdc_path"])[ind_false])
paths_new,info = aid_bin.find_files(user_selected_path,paths,hashes)
text = ('\n'.join([str(a) +"\t"+ b for a,b in zip(paths_new,info)]))
self.textBrowser_Info_pop2.setText(text)
#Add stuff to table_dragdrop
rowPosition = int(self.table_dragdrop.rowCount())
self.dataDropped(paths_new)
for i in range(len(paths_new)):
#set the index (celltype)
try:
index = int(np.array(UsedData_false["class"])[i])
except:
index = int(np.array(UsedData_false["index"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.cellWidget(rowPosition+i, 1).setValue(index)
#is it checked for train or valid?
trorvalid = str(np.array(UsedData_false["TrainOrValid"])[i])
if trorvalid=="Train":
self.table_dragdrop.item(rowPosition+i, 2).setCheckState(QtCore.Qt.Checked)
elif trorvalid=="Valid":
self.table_dragdrop.item(rowPosition+i, 3).setCheckState(QtCore.Qt.Checked)
#how many cells/epoch during training or validation?
nr_events_epoch = str(np.array(UsedData_false["nr_events_epoch"])[i])
#how many cells/epoch during training or validation?
try:
nr_events_epoch = str(np.array(UsedData_false["nr_events_epoch"])[i])
except:
nr_events_epoch = str(np.array(UsedData_false["nr_cells_epoch"])[i])
print("You are using an old version of AIDeveloper. Consider upgrading")
self.table_dragdrop.item(rowPosition+i, 6).setText(nr_events_epoch)
#Shuffle or not?
shuffle = bool(np.array(UsedData_false["shuffle"])[i])
if shuffle==False:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Unchecked)
#Set Cells/Epoch to not editable
item = self.table_dragdrop.item(rowPosition+i, 6)
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(item.flags() |QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable )
else:
self.table_dragdrop.item(rowPosition+i, 8).setCheckState(QtCore.Qt.Checked)
#zoom_factor = float(np.array(UsedData_false["zoom_factor"])[i])
zoom_factor = str(np.array(UsedData_false["zoom_factor"])[i])
self.table_dragdrop.item(rowPosition+i, 9).setText(zoom_factor)
self.w_pop2 = MyPopup()
self.gridLayout_w_pop2 = QtWidgets.QGridLayout(self.w_pop2)
self.gridLayout_w_pop2.setObjectName("gridLayout_w_pop2")
self.verticalLayout_w_pop2 = QtWidgets.QVBoxLayout()
self.verticalLayout_w_pop2.setObjectName("verticalLayout_w_pop2")
self.horizontalLayout_w_pop2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_w_pop2.setObjectName("horizontalLayout_w_pop2")
self.pushButton_Close_pop2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_Close_pop2.setObjectName("pushButton_Close_pop2")
self.pushButton_Close_pop2.clicked.connect(self.w_pop2.close)
self.horizontalLayout_w_pop2.addWidget(self.pushButton_Close_pop2)
self.pushButton_Search_pop2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_Search_pop2.clicked.connect(add_missing_files)
self.pushButton_Search_pop2.setObjectName("pushButton_Search")
self.horizontalLayout_w_pop2.addWidget(self.pushButton_Search_pop2)
self.verticalLayout_w_pop2.addLayout(self.horizontalLayout_w_pop2)
self.textBrowser_Info_pop2 = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser_Info_pop2.setObjectName("textBrowser_Info_pop2")
self.verticalLayout_w_pop2.addWidget(self.textBrowser_Info_pop2)
self.gridLayout_w_pop2.addLayout(self.verticalLayout_w_pop2, 0, 0, 1, 1)
self.w_pop2.setWindowTitle("There are missing files. Do you want to search for them?")
self.pushButton_Close_pop2.setText("No")
self.pushButton_Search_pop2.setText("Define folder to search files")
self.w_pop2.show()
#Ask user if only data, or the full set of parameters should be loaded
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
msg.setText(tooltips["msg_loadSession"])
msg.setWindowTitle("Load only data table all parameters?")
msg.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.Save)# | QtGui.QMessageBox.Cancel)
dataonly = msg.button(QtGui.QMessageBox.Yes)
dataonly.setText('Data table only')
allparams = msg.button(QtGui.QMessageBox.Save)
allparams.setText('Data and all parameters')
# cancel = msg.button(QtGui.QMessageBox.Cancel)
# cancel.setText('Cancel')
msg.exec_()
#Only update the data table.
if msg.clickedButton()==dataonly: #show image and heatmap overlay
pass
#Load the parameters
elif msg.clickedButton()==allparams: #show image and heatmap overlay
Parameters = pd.read_excel(xlsx,sheet_name="Parameters")
aid_frontend.load_hyper_params(self,Parameters)
# if msg.clickedButton()==cancel: #show image and heatmap overlay
# return
#If all this run without error, save the path.
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
#Update the overview-box
if self.groupBox_DataOverview.isChecked()==True:
self.dataOverviewOn()
def actionSaveSession_function(self):
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save session', Default_dict["Path of last model"],"AIDeveloper Session file (*_session.xlsx)")
filename = filename[0]
path, fname = os.path.split(filename)
if len(fname)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if fname.endswith(".xlsx"):
fname = fname.split(".xlsx")[0]
if fname.endswith("_session"):
fname = fname.split("_session")[0]
if fname.endswith("_meta"):
fname = fname.split("_meta")[0]
if fname.endswith(".model"):
fname = fname.split(".model")[0]
if fname.endswith(".arch"):
fname = fname.split(".arch")[0]
#add the suffix _session.xlsx
if not fname.endswith("_session.xlsx"):
fname = fname +"_session.xlsx"
filename = os.path.join(path,fname)
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the -session.xlsx
SelectedFiles = self.items_clicked()
SelectedFiles_df = pd.DataFrame(SelectedFiles)
pd.DataFrame().to_excel(writer,sheet_name='UsedData') #initialize empty Sheet
SelectedFiles_df.to_excel(writer,sheet_name='UsedData')
DataOverview_df = self.get_dataOverview()
DataOverview_df.to_excel(writer,sheet_name='DataOverview') #write data overview to separate sheet
#Get all hyper parameters
Para_dict = pd.DataFrame()
Para_dict["AIDeveloper_Version"]=VERSION,
Para_dict["model_zoo_version"]=model_zoo_version,
try:
Para_dict["OS"]=platform.platform(),
Para_dict["CPU"]=platform.processor(),
except:
Para_dict["OS"]="Unknown",
Para_dict["CPU"]="Unknown",
Para_dict = aid_frontend.get_hyper_params(Para_dict,self)
Para_dict.to_excel(writer,sheet_name='Parameters')
writer.save()
writer.close()
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Successfully saved as "+filename)
msg.setWindowTitle("Successfully saved")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def actionClearList_function(self):
#Remove all items from dragdrop table
while (self.table_dragdrop.rowCount() > 0):
self.table_dragdrop.removeRow(0)
#reset ram
self.ram = dict()
#Remove all items from comboBox_chooseRtdcFile
self.comboBox_chooseRtdcFile.clear()
self.comboBox_selectData.clear()
if self.groupBox_DataOverview.isChecked()==True:
self.dataOverviewOn()
def actionRemoveSelected_function(self):
#Which rows are highlighted?
rows_selected = np.array([index.row() for index in self.table_dragdrop.selectedIndexes()])
for row in (rows_selected):
self.table_dragdrop.removeRow(row)
self.comboBox_chooseRtdcFile.removeItem(row)
self.comboBox_selectData.removeItem(row)
#if there are rows below this row, they will move up one step:
ind = np.where(np.array(rows_selected)>row)[0]
rows_selected[ind] -= 1
def actionSaveToPng_function(self):
#Which table items are selected?
rows_selected = np.array([index.row() for index in self.table_dragdrop.selectedIndexes()])
if len(rows_selected)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("Please first select rows in the table!")
msg.setWindowTitle("No rows selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Ask user to which folder the images should be written:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save to .png/.jpg', Default_dict["Path of last model"],"Image file format (*.png *.jpg *.bmp *.eps *.gif *.ico *.icns)")
filename = filename[0]
if len(filename)==0:
return
filename_X, file_extension = os.path.splitext(filename)#divide into path and file_extension if possible
#Check if the chosen file_extension is valid
if not file_extension in [".png",".jpg",".bmp",".eps",".gif",".ico",".icns"]:
print("Invalid file extension detected. Will use .png instead.")
file_extension = ".png"
#Check the chosen export-options
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
elif bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Plase choose a different Export-option in ->Options->Export")
msg.setWindowTitle("Export is turned off!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if save_cropped==True:
#Collect information for image processing
cropsize = self.spinBox_imagecrop.value()
color_mode = str(self.comboBox_loadedRGBorGray.currentText())
#zoom_methods = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = np.where(np.array(zoom_methods)==True)[0]
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
index = 0
for row in (rows_selected):
#get the corresponding rtdc_path
rtdc_path = str(self.table_dragdrop.cellWidget(row, 0).text())
nr_events = None #no number needed as we take all images (replace=False in gen_crop_img)
zoom_factor = float(self.table_dragdrop.item(row, 9).text())
gen = aid_img.gen_crop_img(cropsize,rtdc_path,nr_events=nr_events,replace=False,random_images=False,zoom_factor=zoom_factor,zoom_order=zoom_order,color_mode=color_mode,padding_mode='constant')
images = next(gen)[0]
#Save the images data to .png/.jpeg...
for img in images:
img = PIL.Image.fromarray(img)
img.save(filename_X+"_"+str(index)+file_extension)
index+=1
if save_cropped==False:#save the original images without pre-processing
index = 0
for row in (rows_selected):
rtdc_path = str(self.table_dragdrop.cellWidget(row, 0).text())
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
images = rtdc_ds["events"]["image"] #get the images
#Save the images data to .png/.jpeg...
for img in images:
img = PIL.Image.fromarray(img)
img.save(filename_X+"_"+str(index)+file_extension)
index+=1
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def actionRemoveSelectedPeaks_function(self):
#Which rows are highlighted?
rows_selected = np.array([index.row() for index in self.tableWidget_showSelectedPeaks.selectedIndexes()])
#delete each row only once :)
rows_selected = np.unique(rows_selected)
for row in (rows_selected):
self.tableWidget_showSelectedPeaks.removeRow(row)
#if there are rows below this row, they will move up one step:
ind = np.where(np.array(rows_selected)>row)[0]
rows_selected[ind] -=1
#Update the widget_showSelectedPeaks
self.update_peak_plot()
def actionRemoveAllPeaks_function(self):
#Remove all items from tableWidget_showSelectedPeaks
while (self.tableWidget_showSelectedPeaks.rowCount() > 0):
self.tableWidget_showSelectedPeaks.removeRow(0)
def actionDataToRamNow_function(self):
self.statusbar.showMessage("Moving data to RAM")
#check that the nr. of classes are equal to the model out put
SelectedFiles = self.items_clicked()
color_mode = self.get_color_mode()
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
#Get the user-defined cropping size
crop = int(self.spinBox_imagecrop.value())
#Make the cropsize a bit larger since the images will later be rotated
cropsize2 = np.sqrt(crop**2+crop**2)
cropsize2 = np.ceil(cropsize2 / 2.) * 2 #round to the next even number
dic = aid_img.crop_imgs_to_ram(list(SelectedFiles),crop,zoom_factors=zoom_factors,zoom_order=zoom_order,color_mode=color_mode)
self.ram = dic
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Successfully moved data to RAM")
msg.setWindowTitle("Moved Data to RAM")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
self.statusbar.showMessage("")
###########################################################################
###########################################################################
###########################################################################
###########################################################################
#######################Functions for Assess model tab######################
def assessmodel_tab_load_model(self):
#Get the requested model-name from the chosen metafile
#Open a QFileDialog
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Select a trained model you want to assess', Default_dict["Path of last model"],"Keras Model file (*.model)")
filename = filename[0]
if os.path.isfile(filename):
#Put this path on the Assess Model tab
self.lineEdit_LoadModel_2.setText(filename)
#Save the path to a variable that is then used by history_tab_convertModel_nnet_worker
self.load_model_path = filename
#Get the modelindex
path,filename = os.path.split(filename)
modelindex = filename.split(".model")[0]
modelindex = int(modelindex.split("_")[-1])
#Update the modelindex on the Assess Model tab
self.spinBox_ModelIndex_2.setValue(int(modelindex))
model_full_h5 = h5py.File(self.load_model_path, 'r')
model_config = model_full_h5.attrs['model_config']
model_full_h5.close() #close the hdf5
model_config = json.loads(str(model_config)[2:-1])
try: #Sequential Model
in_dim = model_config['config'][0]['config']['batch_input_shape']
except: #Functional Api
in_dim = model_config['config']["layers"][0]["config"]["batch_input_shape"]
try: #Sequential Model
out_dim = model_config['config'][-2]['config']['units']
except: #Functional Api
out_dim = model_config['config']["layers"][-2]["config"]["units"]
self.spinBox_Crop_2.setValue(int(in_dim[-2]))
self.spinBox_OutClasses_2.setValue(int(out_dim))
print("input dimension:"+str(in_dim))
#Adjust the Color mode in the UI:
channels = in_dim[-1] #TensorFlow: channels in last dimension
if channels==1:
#Set the combobox on Assess model tab to Grayscale; just info for user
index = self.comboBox_loadedRGBorGray.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_loadedRGBorGray.setCurrentIndex(index)
#Check the currently set color_mode. This is important since images are loaded accordingly
if self.get_color_mode()!="Grayscale":
#when model needs Grayscale, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("Grayscale", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to Grayscale",5000)
elif channels==3:
#Set the combobox on Assess model tab to Grayscale; just info for user
index = self.comboBox_loadedRGBorGray.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_loadedRGBorGray.setCurrentIndex(index)
#Check the currently set color_mode. This is important since images are loaded accordingly
if self.get_color_mode()!="RGB":
#when model needs RGB, set the color mode in comboBox_GrayOrRGB to that
index = self.comboBox_GrayOrRGB.findText("RGB", QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_GrayOrRGB.setCurrentIndex(index)
self.statusbar.showMessage("Color Mode set to RGB",5000)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Channel dimensions of model ("+str(channels)+" channels) is not supported. Only 1 or 3 channels are allowed.")
msg.setWindowTitle("Unsupported channel dimension")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
modelindex = int(self.spinBox_ModelIndex_2.value())
path,fname = os.path.split(self.load_model_path)
fname = fname.split(str(modelindex)+".model")[0]+"meta.xlsx"
metafile_path = os.path.join(path,fname)
try:
img_processing_settings = aid_img.load_model_meta(metafile_path)
self.img_processing_settings = img_processing_settings
model_type = str(img_processing_settings["model_type"].values[0])
normalization_method = str(img_processing_settings["normalization_method"].values[0])
index = self.comboBox_Normalization_2.findText(normalization_method, QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBox_Normalization_2.setCurrentIndex(index)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Unkown normalization method found in .meta file")
msg.setWindowTitle("Unkown normalization method")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
self.lineEdit_ModelSelection_2.setText(model_type)
except: #there is not such a file, or the file cannot be opened
#Ask the user to choose the normalization method
self.lineEdit_ModelSelection_2.setText("Unknown")
self.comboBox_Normalization_2.setEnabled(True)
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Meta file not found/ Could not be read. Please specify the normalization method manually (dropdown menu)")
msg.setWindowTitle("No .meta available")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(self.load_model_path)[0]
aid_bin.save_aid_settings(Default_dict)
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("File not found!:\nProbably the .model was deleted or not saved")
msg.setWindowTitle("File not found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
def inference_time_worker(self,progress_callback,history_callback):
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
#Create config (define which device to use)
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
#Retrieve more Multi-GPU Options from Menubar:
cpu_merge = bool(self.actioncpu_merge.isEnabled())
cpu_relocation = bool(self.actioncpu_relocation.isEnabled())
cpu_weight_merge = bool(self.actioncpu_weightmerge.isEnabled())
#Initiate a fresh session
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
if deviceSelected=="Multi-GPU" and cpu_weight_merge==True:
with tf.device("/cpu:0"):
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
else:
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
#Multi-GPU
if deviceSelected=="Multi-GPU":
print("Adjusting the model for Multi-GPU")
model_keras = multi_gpu_model(model_keras, gpus=gpu_nr, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)#indicate the numbers of gpus that you have
#Get the model input dimensions
in_dim = np.array(model_keras.get_input_shape_at(0))
ind = np.where(in_dim==None)
in_dim[ind] = 1
nr_imgs = self.spinBox_inftime_nr_images.value()
nr_imgs = int(np.round(float(nr_imgs)/10.0))
#Warm up by predicting a single image
image = (np.random.randint(0,255,size=in_dim)).astype(np.float32)/255.0
model_keras.predict(image) # warm up
Times = []
for k in range(10):
image = (np.random.randint(0,255,size=in_dim)).astype(np.float32)/255.0
t1 = time.time()
for i in range(nr_imgs):#predict 50 times 20 images
model_keras.predict(image)
t2 = time.time()
dt = (t2-t1)/(nr_imgs) #divide by nr_imgs to get time [s] per image
dt = dt*1000.0 #multiply by 1000 to change to ms range
dic = {"outp":str(round(dt,3))+"ms"}
history_callback.emit(dic)
Times.append(dt)
#Send out the Times
text = " [ms] Mean: "+str(round(np.mean(Times),3))+"; "+"Median: "+str(round(np.median(Times),3))+"; "+"Min: "+str(round(np.min(Times),3))+"; "+"Max: "+str(round(np.max(Times),3))
dic = {"outp":text}
history_callback.emit(dic)
progress_callback.emit(1) #when finished return one
self.threadpool_single_queue = 0 #reset the thread-counter
def inference_time(self):
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
#Inform user that certain config is used for inference
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Will use "+deviceSelected+" for inference. To change bewtween CPU and GPU, use the options on the Build-Tab")
msg.setWindowTitle("CPU used for inference")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Take the model path from the GUI
self.load_model_path = str(self.lineEdit_LoadModel_2.text())
if len(self.load_model_path)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a model path first")
msg.setWindowTitle("No model path found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Increase the thread-counter by one; only after finishing the thread, it will be reset to 0
self.threadpool_single_queue += 1
if self.threadpool_single_queue == 1:
worker = Worker(self.inference_time_worker)
def get_dt_from_worker(dic):
outp = dic["outp"]
self.lineEdit_InferenceTime.setText(outp)
worker.signals.history.connect(get_dt_from_worker)
self.threadpool_single.start(worker)
def update_check_worker(self,progress_callback,history_callback):
#Retrieve information from GitHub
dic = aid_bin.check_for_updates(VERSION)
#dic = {"Errors":None,"latest_release":latest_release,"latest_release_url":url,"changelog":changelog}
history_callback.emit(dic)
progress_callback.emit(1) #when finished return one
self.threadpool_single_queue = 0 #reset the thread-counter
def actionUpdate_check_function(self):
#Increase the thread-counter by one; only after finishing the thread, it will be reset to 0
self.threadpool_single_queue += 1
if self.threadpool_single_queue == 1:
worker = Worker(self.update_check_worker)
def get_info_from_worker(dic):
#Create a popup window
self.popup_updates = MyPopup()
self.popup_updates_ui = aid_frontend.Ui_Updates()
self.popup_updates_ui.setupUi(self.popup_updates) #open a popup
if dic["Errors"]!=None:#if there is an errror (no internet...)
#display the error in the textbrowser
text = str(dic["Errors"])
elif dic["Errors"]==None:#No errors! Nice
latest_release = dic["latest_release"]
if latest_release=="You are up to date":
text = "Your major version of AIDeveloper is up-to-date. Check below if there are updates available for that major version. <br>Example: Your major version of AIDeveloper is 0.2.0, then all updates which start with 0.2.x will be compatible."
text = "<html><head/><body><p>"+text+"</p></body></html>"
else:
text = "There is a new major update available. To download, follow this link:"
text = text+"<br>"+"<a href ="+dic["latest_release_url"]+">"+dic["latest_release_url"]+"</a>"
text = text+"<br>"+dic["changelog"]
text = text+"<br>Major updates need to be downloaded and installed manually. After that, you can install minor updates (which correspond to that major version) using the menu below."
text = "<html><head/><body><p>"+text+"</p></body></html>"
#Fill info text (on top of Update Popup window)
self.popup_updates_ui.textBrowser_majorVersionInfo.setText(text)
#Fill lineEdit "Your version"
self.popup_updates_ui.lineEdit_yourVersion.setText(VERSION)
#Add updates to the comboBox
self.popup_updates_ui.comboBox_updatesOndevice.addItems(dic["tags_update_ondevice"])
self.popup_updates_ui.comboBox_updatesOnline.addItems(dic["tags_update_online"])
self.popup_updates.show()
self.popup_updates_ui.pushButton_installOndevice.clicked.connect(lambda: self.update_aideveloper("local"))
self.popup_updates_ui.pushButton_installOnline.clicked.connect(lambda: self.update_aideveloper("github"))
self.popup_updates_ui.pushButton_findFile.clicked.connect(self.update_addLocalFile)
worker.signals.history.connect(get_info_from_worker)
self.threadpool_single.start(worker)
def actionTerminology_function(self):
#show a messagebox with link to terminology github page
icon = QtGui.QImage(os.path.join(dir_root,"art",Default_dict["Icon theme"],"main_icon_simple_04_256"+icon_suff))
icon = QtGui.QPixmap(icon).scaledToHeight(32, QtCore.Qt.SmoothTransformation)
msg = QtWidgets.QMessageBox()
msg.setIconPixmap(icon)
text = "To learn more about machine learning/ deep learning specific terminology, please visit:<br>"
url = "<a href=https://github.com/maikherbig/AIDeveloper/tree/master/Terminology>https://github.com/maikherbig/AIDeveloper/tree/master/Terminology</a>"
text = "<html><head/><body><p>"+text+url+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("ML/DL Terminology")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def update_aideveloper(self,source):
#retrieve the current text on comboBox_availableUpdates
if source=="local":
item_text = str(self.popup_updates_ui.comboBox_updatesOndevice.currentText())
elif source=="github":
item_text = str(self.popup_updates_ui.comboBox_updatesOnline.currentText())
#Length of the version name should not be 0
if len(item_text)==0:
e = "No update available"
aid_frontend.message(e)
return
if source=="local":
#Complete file path (item_text not enough)
item_path = "AIDeveloper_"+item_text+".zip"
item_path = os.path.join(dir_root,item_path)
elif source=="github":
if item_text=="Bleeding edge":
#user want the most recent scripts from GitHub.
downloadprocess = aid_bin.download_aid_repo()
else:
#item_text is a tag of the version. Use tag to download the zip
downloadprocess = aid_bin.download_aid_update(item_text)
#Check if download was successful
if downloadprocess["success"]==False:#if the download was not done show message
message = "Download was not conducted. Probably, the file is already present in:/n"+downloadprocess["path_save"]
aid_frontend.message(message,msg_type="Warning")
return
#Retrieve the path of the zip file (contains the update files)
item_path = downloadprocess["path_save"]
if not os.path.isfile(item_path):#in case that is no file (zip file not created...)
e = "Update requires a zip file. Could not find/create such a file!"
aid_frontend.message(e)
#Perform the update (including backup of current version)
path_backup = aid_bin.update_from_zip(item_path,VERSION)
#message: Installation successful-> need to restart AID
msg = "Update successful. Please restart AIDeveloper. A backup of your previous version is stored in:\n"+path_backup
aid_frontend.message(msg,msg_type="Information")
def update_addLocalFile(self):
#open a filedialog
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose update file', dir_root,"AID update file (*.zip)")
filename = filename[0]
print(filename)
#Check if the file is a zip
if not filename.endswith(".zip"):#file has to be .zip
text = "Chosen file is not a .zip archive!"
aid_frontend.message(msg_text=text,msg_type="Error")
#Check that file exists
if not os.path.isfile(filename):
text = "File not found"
aid_frontend.message(msg_text=text,msg_type="Error")
return
base,_ = os.path.split(filename)
#ensure that filename obeys the name convention: "AIDeveloper_"+tag_name+".zip"
tag_name = datetime.datetime.now().strftime("%Y%m%d_%H-%M-%S")+"-update"
save_name = "AIDeveloper_"+tag_name+".zip"
save_name = os.path.join(dir_root,save_name)
#copy the file to dir_root
shutil.copy(filename,save_name)
#append tag_name to combobox
self.popup_updates_ui.comboBox_updatesOndevice.addItem(tag_name)
text = "Update is now availabele via the Dropdown menu on the left ("+tag_name+")."
text += " The file was copied to:\n"
text += save_name
aid_frontend.message(msg_text=text,msg_type="Information")
def get_validation_data_from_clicked(self,get_normalized=True):
#Check, if files were clicked
SelectedFiles = self.items_clicked_no_rtdc_ds()
######################Load the Validation Data################################
ind = [selectedfile["TrainOrValid"] == "Valid" for selectedfile in SelectedFiles]
ind = np.where(np.array(ind)==True)[0]
if len(ind)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No validation data was selected. Please use tab 'Build' and drag/drop to load data")
msg.setWindowTitle("No validation data selected")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return 0
SelectedFiles_valid = np.array(SelectedFiles)[ind]
SelectedFiles_valid = list(SelectedFiles_valid)
indices_valid = [selectedfile["class"] for selectedfile in SelectedFiles_valid]
nr_events_epoch_valid = [selectedfile["nr_events_epoch"] for selectedfile in SelectedFiles_valid]
rtdc_path_valid = [selectedfile["rtdc_path"] for selectedfile in SelectedFiles_valid]
zoom_factors_valid = [selectedfile["zoom_factor"] for selectedfile in SelectedFiles_valid]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
shuffle_valid = [selectedfile["shuffle"] for selectedfile in SelectedFiles_valid]
xtra_in = set([selectedfile["xtra_in"] for selectedfile in SelectedFiles])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#Read other model properties from the Ui
norm = self.comboBox_Normalization_2.currentText()
norm = str(norm)
#if normalization method needs mean/std of training set, the metafile needs to be loaded:
if norm == "StdScaling using mean and std of all training data":
modelindex = int(self.spinBox_ModelIndex_2.value())
path,fname = os.path.split(self.load_model_path)
fname = fname.split(str(modelindex)+".model")[0]+"meta.xlsx"
metafile_path = os.path.join(path,fname)
parameters = pd.read_excel(metafile_path,sheet_name='Parameters')
mean_trainingdata = parameters["Mean of training data used for scaling"]
std_trainingdata = parameters["Std of training data used for scaling"]
else:
mean_trainingdata = None
std_trainingdata = None
crop = int(self.spinBox_Crop_2.value())
paddingMode = str(self.comboBox_paddingMode.currentText())#.lower()
#read self.ram to new variable ; DONT clear ram after since multiple assessments can run on the same data.
DATA = self.ram
#self.ram = dict() #DONT clear the ram here!
############Cropping#####################
X_valid,y_valid,Indices,Xtra_in = [],[],[],[]
for i in range(len(SelectedFiles_valid)):
if not self.actionDataToRam.isChecked():
#Replace=True means that individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else: #get a similar generator, using the ram-data
if len(DATA)==0:
#Replace=True means that individual cells could occur several times
gen_valid = aid_img.gen_crop_img(crop,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,zoom_factor=zoom_factors_valid[i],zoom_order=zoom_order,color_mode=self.get_color_mode(),padding_mode=paddingMode,xtra_in=xtra_in)
else:
if self.actionVerbose.isChecked():
print("Loaded data from RAM")
gen_valid = aid_img.gen_crop_img_ram(DATA,rtdc_path_valid[i],nr_events_epoch_valid[i],random_images=shuffle_valid[i],replace=True,xtra_in=xtra_in) #Replace=True means that individual cells could occur several times
gen = next(gen_valid)
X_valid.append(gen[0])
y_valid.append(np.repeat(indices_valid[i],X_valid[-1].shape[0]))
Indices.append(gen[1]) #Cell index to track the event in the data-set(not cell-type!)
Xtra_in.append(gen[2])
X_valid_orig = [X.astype(np.uint8) for X in X_valid]
X_valid = np.concatenate(X_valid)
Xtra_in = np.concatenate(Xtra_in)
# dim = X_valid.shape
# if dim[2]!=crop:
# remove = int(dim[2]/2.0 - crop/2.0)
# #X_batch = X_batch[:,:,remove:-remove,remove:-remove] #crop to crop x crop pixels #Theano
# X_valid = X_valid[:,remove:-remove,remove:-remove] #crop to crop x crop pixels #TensorFlow
print("X_valid has following dimension:")
print(X_valid.shape)
y_valid = np.concatenate(y_valid)
if len(np.array(X_valid).shape)<3:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Discarded all events because too far at border of image (check zooming/cropping settings!)")
msg.setWindowTitle("Empty dataset!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return 0
if get_normalized == True:
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(X_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(X_valid,norm)
else:
X_valid = None
dic = {"SelectedFiles_valid":SelectedFiles_valid,"nr_events_epoch_valid":nr_events_epoch_valid,"rtdc_path_valid":rtdc_path_valid,"X_valid_orig":X_valid_orig,"X_valid":X_valid,"y_valid":y_valid,"Indices":Indices,"Xtra_in":Xtra_in}
self.ValidationSet = dic
return 1
def export_valid_to_rtdc(self):
if not type(self.ValidationSet) is type(None): #If ValidationSet is not none, there has been a ValidationSet loaded already
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Re-used validation data (from RAM) loaded earlier. If that is not good, please check and uncheck a file on 'Build' tab. This will delete the validation data from RAM")
msg.setWindowTitle("Re-Used data")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
worked = 1
else: #Otherwise get the validation data from the stuff that is clicked on 'Build'-Tab
worked = self.get_validation_data_from_clicked() #after that, self.ValidationSet will exist
if worked==0:
return
rtdc_path_valid = self.ValidationSet["rtdc_path_valid"]
X_valid = []
X_valid.append(self.ValidationSet["X_valid"][:,:,:,0])
X_valid_orig = self.ValidationSet["X_valid_orig"]
Xtra_in = self.ValidationSet["Xtra_in"]
Indices = self.ValidationSet["Indices"]
y_valid = self.ValidationSet["y_valid"]
#Get a filename from the user for the new file
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save to rtdc', Default_dict["Path of last model"],"rtdc file (*.rtdc)")
filename = filename[0]
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#add the suffix _Valid_Data.avi or _Valid_Labels.npy
if not filename.endswith(".rtdc"):
filename = filename +".rtdc"
filename_X = filename.split(".rtdc")[0]+"_Valid_Data.rtdc"
filename_y = filename.split(".rtdc")[0]+"_Valid_Labels.txt"
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
elif bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("You could choose a different Exporting option in ->Option->Export")
msg.setWindowTitle("Export is turned off!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
aid_bin.write_rtdc(filename_X,rtdc_path_valid,X_valid_orig,Indices,cropped=save_cropped,color_mode=self.get_color_mode(),xtra_in=Xtra_in)
np.savetxt(filename_y,y_valid.astype(int),fmt='%i')
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def import_valid_from_rtdc(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open Valid_Data.rtdc', Default_dict["Path of last model"],".rtdc file (*_Valid_Data.rtdc)")
filename = filename[0]
rtdc_path = filename
if len(filename)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No valid filename was chosen.")
msg.setWindowTitle("No valid filename was chosen")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Load the corresponding labels
filename_labels = filename.split("Data.rtdc")[0]+"Labels.txt"
if not os.path.isfile(filename_labels):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("No corresponding _Labels.npy file found! Expected it here: "+filename_labels)
msg.setWindowTitle("No Labels found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
y_valid = np.loadtxt(filename_labels).astype(int)
#Inform user (statusbar message)
self.statusbar.showMessage("Loaded labels from "+filename_labels,5000)
#Read images from .rtdc file
failed,rtdc_ds = aid_bin.load_rtdc(rtdc_path)
if failed:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(rtdc_ds))
msg.setWindowTitle("Error occurred during loading file")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Load meta file
#filename_meta = filename.split("Valid_Data.rtdc")[0]+"meta.xlsx"
#Make the Image dimensions matching the requirements of the model
model_in = int(self.spinBox_Crop_2.value())
model_out = int(self.spinBox_OutClasses_2.value())
color_mode = str(self.comboBox_loadedRGBorGray.currentText())
# if color_mode=='RGB': #User wants RGB images
# target_channels = 3
# if color_mode=='Grayscale': # User want to have Grayscale
# target_channels = 1
if model_in==1 and model_out==1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please first define a model. The validation data will then be cropped according to the required model-input size")
msg.setWindowTitle("No model defined")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
x_valid = np.array(rtdc_ds["events"]["image"])
#dim = x_valid.shape[1]
#channels = x_valid.shape[-1]
#Get further image processing settings from self.
zoom_factor = float(self.img_processing_settings["zoom_factor"].values[0])
zoom_interpol_method = str(self.img_processing_settings["zoom_interpol_method"].values[0])
padding_mode = str(self.img_processing_settings["padding_mode"].values[0])
#normalization_method = str(self.img_processing_settings["normalization_method"].values[0])
norm = self.comboBox_Normalization_2.currentText()
norm = str(norm)
mean_trainingdata = self.img_processing_settings["mean_trainingdata"].values[0]
std_trainingdata = self.img_processing_settings["std_trainingdata"].values[0]
gen_valid = aid_img.gen_crop_img(cropsize=model_in,rtdc_path=rtdc_path,random_images=False,zoom_factor=zoom_factor,zoom_order=zoom_interpol_method,color_mode=color_mode,padding_mode=padding_mode,xtra_in=False)
x_valid,index,xtra_valid = next(gen_valid)
#When object is too far at side of image, the frame is dropped.
#Consider this for y_valid
y_valid = y_valid[index]
if not model_in==x_valid.shape[-2]:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model input dimension ("+str(model_in)+"x"+str(model_in)+"pix) and validation data dimension ("+str(x_valid.shape)+") do not match")
msg.setWindowTitle("Wrong image dimension")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#Normalize the images
X_valid_orig = np.copy(x_valid) #copy the cropped but non-normalized images
if norm == "StdScaling using mean and std of all training data":
X_valid = aid_img.image_normalization(x_valid,norm,mean_trainingdata,std_trainingdata)
else:
X_valid = aid_img.image_normalization(x_valid,norm)
Indices = np.array(range(X_valid.shape[0])) #those are just indices to identify single cells in the file ->not cell-type indices!
SelectedFiles_valid = None #[].append(rtdc_path)#
nr_events_epoch_valid = None
rtdc_h5 = h5py.File(rtdc_path, 'r')
try:
Xtra_in = np.array(rtdc_h5["xtra_in"])[index]
except:
Xtra_in = []
rtdc_h5.close() #close the hdf5
dic = {"SelectedFiles_valid":SelectedFiles_valid,"nr_events_epoch_valid":nr_events_epoch_valid,"rtdc_path_valid":[rtdc_path],"X_valid_orig":[X_valid_orig],"X_valid":X_valid,"y_valid":y_valid,"Indices":[Indices],"Xtra_in":Xtra_in}
self.ValidationSet = dic
self.statusbar.showMessage("Validation data loaded to RAM",5000)
#Update the table
#Prepare a table in tableWidget_Info
self.tableWidget_Info_2.setColumnCount(0)#Reset table
self.tableWidget_Info_2.setRowCount(0)#Reset table
self.tableWidget_Info_2.setColumnCount(4) #Four columns
nr_ind = len(set(y_valid)) #number of different labels ("indices")
nr_rows = nr_ind
self.tableWidget_Info_2.setRowCount(nr_rows)
#Wich selected file has the most features?
header_labels = ["Class","Nr of cells","Clr","Name"]
self.tableWidget_Info_2.setHorizontalHeaderLabels(header_labels)
header = self.tableWidget_Info_2.horizontalHeader()
for i in range(4):
header.setResizeMode(i, QtWidgets.QHeaderView.ResizeToContents)
rowPosition = 0
#Total nr of cells for each index
for index in np.unique(y_valid):
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 0, item)
#Get the validation files of that index
ind = np.where(y_valid==index)[0]
nr_events_epoch = len(ind)
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, str(np.sum(nr_events_epoch)))
self.tableWidget_Info_2.setItem(rowPosition, 1, item)
#Column for color
item = QtWidgets.QTableWidgetItem()
item.setFlags(item.flags() &~QtCore.Qt.ItemIsEnabled &~ QtCore.Qt.ItemIsSelectable )
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole, "")
item.setBackground(QtGui.QColor(self.colorsQt[index]))
self.tableWidget_Info_2.setItem(rowPosition, 2, item)
#Column for User specified name
item = QtWidgets.QTableWidgetItem()
#item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(index))
self.tableWidget_Info_2.setItem(rowPosition, 3, item)
rowPosition += 1
self.tableWidget_Info_2.resizeColumnsToContents()
self.tableWidget_Info_2.resizeRowsToContents()
def cm_interaction(self,item):
"""
Grab validation data of particular class, load the scores (model.predict)
and save images to .rtdc, or show them (users decision)
first, "assess_model_plotting" has the be carried out
"""
true_label = item.row()
predicted_label = item.column()
#If there is X_valid and y_valid on RAM, use it!
if not type(self.ValidationSet) is type(None): #If X_valid is not none, there has been X_valid loaded already
self.statusbar.showMessage("Re-used validation data (from RAM) loaded earlier. If that is not good, please check and uncheck a file on 'Build' tab. This will delete the validation data from RAM",2000)
else: #Otherwise get the validation data from the stuff that is clicked on 'Build'-Tab
self.get_validation_data_from_clicked() #after that, self.ValidationSet will exist
self.statusbar.showMessage("Loaded data corresponding to the clicked files on 'Build'-tab",2000)
rtdc_path_valid = self.ValidationSet["rtdc_path_valid"]
X_valid_orig = self.ValidationSet["X_valid_orig"] #cropped but non-normalized images
Indices = self.ValidationSet["Indices"]
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
dic = self.Metrics #gives {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
if len(dic)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Data was altered. Please run 'Update Plots' again")
msg.setWindowTitle("Data has changed")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
pred = dic["pred"]
#get the length of each Index-list,
lengths = [len(l) for l in Indices]
starts = np.cumsum(lengths)
ToSave, y_valid_list, Indices_ = [],[],[] #list; store images remaining to indiv. .rtdc set in there
starts = np.array([0]+list(starts))
for i in range(len(lengths)):
y_val = y_valid[starts[i]:starts[i+1]]
pred_ = pred[starts[i]:starts[i+1]]
#update the indx to prepare for next iteration
#indx = lengths[i]
ind = np.where( (y_val==true_label) & (pred_==predicted_label) )[0] #select true_label cells and which of them are clasified as predicted_label
#Grab the corresponding images
ToSave.append(X_valid_orig[i][ind,:,:]) #get non-normalized X_valid to new variable
#X_valid_.append(X_valid[i][ind,:,:]) #get normalized/cropped images ready to run through the model
y_valid_list.append(y_val[ind])
Indices_.append(Indices[i][ind]) #get non-normalized X_valid to new variable
total_number_of_chosen_cells = [len(a) for a in y_valid_list]
total_number_of_chosen_cells = np.sum(np.array(total_number_of_chosen_cells))
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "<html><head/><body><p>Show images/heatmap or save to .rtdc/.png/.jpg?</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Show or save?")
msg.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.Save | QtGui.QMessageBox.Cancel)
show = msg.button(QtGui.QMessageBox.Yes)
show.setText('Show image/heatmap')
# show = msg.button(QtGui.QMessageBox.YesToAll)
# show.setText('Show image/heatmap')
save_png = msg.button(QtGui.QMessageBox.Save)
save_png.setText('Save to .rtdc/.png/.jpg...')
cancel = msg.button(QtGui.QMessageBox.Cancel)
cancel.setText('Cancel')
msg.exec_()
#View image and heatmap overlay (Grad-CAM)
if msg.clickedButton()==show: #show image and heatmap overlay
if total_number_of_chosen_cells==0:
return
#Get the images that were passed through the model for prediction
X_valid = self.ValidationSet["X_valid"] #cropped but non-normalized images
ind = np.where( (y_valid==true_label) & (pred==predicted_label) )[0] #select true_label cells and which of them are clasified as predicted_label
X_valid_ = X_valid[ind]
#Popup window to show images and settings
self.popup_gradcam = QtGui.QDialog()
self.popup_gradcam_ui = aid_frontend.popup_cm_interaction()
self.popup_gradcam_ui.setupUi(self.popup_gradcam) #open a popup to show images and options
#self.popup_imgRes.setWindowModality(QtCore.Qt.WindowModal)
#self.popup_gradcam.setWindowModality(QtCore.Qt.ApplicationModal)
#Fill Model info
self.popup_gradcam_ui.lineEdit_loadModel.setText(self.load_model_path)
in_dim = int(self.spinBox_Crop_2.value()) #grab value from Assess Tab
self.popup_gradcam_ui.spinBox_Crop_inpImgSize.setValue(in_dim)#insert value into popup
out_dim = int(self.spinBox_OutClasses_2.value()) #grab value from Assess Tab
self.popup_gradcam_ui.spinBox_outpSize.setValue(out_dim) #insert value into popup
self.popup_gradcam_ui.spinBox_gradCAM_targetClass.setMaximum(out_dim-1)
#For the grad_cam the name of the final conv-layer needs to be selected
convlayers = [layer.name for layer in self.model_keras.layers if len(layer.output_shape)==4]
convlayers = convlayers[::-1] #reverse list
self.popup_gradcam_ui.comboBox_gradCAM_targetLayer.addItems(convlayers)
#Connect buttons to functions
self.popup_gradcam_ui.pushButton_update.clicked.connect(lambda: self.popup_cm_show_update(ToSave,X_valid_))
self.popup_gradcam_ui.pushButton_reset.clicked.connect(self.popup_cm_reset)
self.popup_gradcam_ui.pushButton_showSummary.clicked.connect(self.popup_show_model_summary)
self.popup_gradcam_ui.pushButton_toTensorB.clicked.connect(self.popup_to_tensorboard)
#Get the original image
img_display = np.concatenate(ToSave)
img_display = np.r_[img_display]
img_display = img_display.swapaxes(1,2)
img_display = np.append(img_display,img_display[-1:],axis=0)
self.popup_gradcam_ui.widget_image.setImage(img_display)
self.popup_gradcam.show()
#For .rtdc/.png... saving
elif msg.clickedButton()==save_png: #Save to .rtdc/.png/.jpg/...
if total_number_of_chosen_cells==0:
return
sumlen = np.sum(np.array([len(l) for l in ToSave]))
self.statusbar.showMessage("Nr. of target cells above threshold = "+str(sumlen),2000)
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save to .rtdc/.png/.jpg', Default_dict["Path of last model"],"File format (*.rtdc *.png *.jpg *.bmp *.eps *.gif *.ico *.icns)")
filename = filename[0]
if len(filename)==0:
return
filename_X, file_extension = os.path.splitext(filename)#divide into path and file_extension if possible
#Check if chosen file_extension is valid
if not file_extension in [".rtdc",".png",".jpg",".bmp",".eps",".gif",".ico",".icns"]:
print("Invalid file extension detected. Will use .png instead.")
file_extension = ".png"
if file_extension==".rtdc":#user wants to save to .rtdc
#add the suffix _Valid_Data.rtdc or _Valid_Labels.txt
if not filename.endswith(".rtdc"):
filename = filename +".rtdc"
filename_X = filename.split(".rtdc")[0]+"_Valid_Data.rtdc"
filename_y = filename.split(".rtdc")[0]+"_Valid_Labels.txt"
#Save the labels
y_valid_list = np.concatenate(y_valid_list)
#Save the .rtdc data (images and all other stuff)
#Should cropped or original be saved?
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
if bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("You may want to choose a different exporting option in ->Options->Export")
msg.setWindowTitle("Export is turned off!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
np.savetxt(filename_y,y_valid_list.astype(int),fmt='%i')
aid_bin.write_rtdc(filename_X,rtdc_path_valid,ToSave,Indices_,cropped=save_cropped,color_mode=self.get_color_mode())
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
else: #some image file format was chosen
#Should cropped or original be saved?
if bool(self.actionExport_Original.isChecked())==True:
print("Export original images")
save_cropped = False
if bool(self.actionExport_Cropped.isChecked())==True:
print("Export cropped images")
save_cropped = True
elif bool(self.actionExport_Off.isChecked())==True:
print("Exporting is turned off")
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("You may want to choose a different exporting option in ->Options->Export")
msg.setWindowTitle("Export is turned off!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Save the images data to .png/.jpeg...
index = 0
for imgs in ToSave:
for img in imgs:
img = PIL.Image.fromarray(img)
img.save(filename_X+"_"+str(index)+file_extension)
index+=1
#If all that run without issue, remember the path for next time
Default_dict["Path of last model"] = os.path.split(filename)[0]
aid_bin.save_aid_settings(Default_dict)
def popup_cm_show_update(self,ToSave,X_valid_):
#ui_item = self.popup_gradcam_ui
#grab information from the popup window
show_image = bool(self.popup_gradcam_ui.groupBox_image_Settings.isChecked())
show_gradCAM = bool(self.popup_gradcam_ui.groupBox_gradCAM_Settings.isChecked())
alpha_1 = float(self.popup_gradcam_ui.doubleSpinBox_image_alpha.value())
alpha_2 = float(self.popup_gradcam_ui.doubleSpinBox_gradCAM_alpha.value())
layer_name = str(self.popup_gradcam_ui.comboBox_gradCAM_targetLayer.currentText()) #self.model_keras exists after assess_model_plotting was carried out
class_ = int(self.popup_gradcam_ui.spinBox_gradCAM_targetClass.value())
colormap = str(self.popup_gradcam_ui.comboBox_gradCAM_colorMap.currentText()) #self.model_keras exists after assess_model_plotting was carried out
colormap = "COLORMAP_"+colormap
colormap = getattr(cv2, colormap)
currentindex = self.popup_gradcam_ui.widget_image.currentIndex
if show_image and not show_gradCAM:
#Get the original image for display
img_display = np.concatenate(ToSave)
img_display = np.r_[img_display]
img_display = img_display.swapaxes(1,2)
img_display = np.append(img_display,img_display[-1:],axis=0)
if show_gradCAM:#grad-Cam is on
img_display = np.concatenate(ToSave)
#compare model input dim and dom of provided data
in_model = self.model_keras.input.shape.as_list()[1:]
in_data = list(X_valid_.shape[1:])
channels_model = in_model[-1]
if not in_data==in_model:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg = "Model input dimension ("+str(in_model)+") not equal to dim. of input data ("+str(in_data)+")"
msg.setText(msg)
msg.setWindowTitle("Input dimension error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
img2 = aid_dl.grad_cam(self.load_model_path, X_valid_, class_, layer_name)#Carry out grad-cam
img2 = [cv2.applyColorMap(cam_, colormap) for cam_ in img2]#create colormap returns BGR image!
img2 = [cv2.cvtColor(cam_, cv2.COLOR_BGR2RGB) for cam_ in img2]#convert to RGB
#in case img_display is grayscale, mimick rgb image by stacking
if channels_model==1:
print("Triple stacking grayscale channel")
img_display = [np.stack((img_display_,)*3, axis=-1) for img_display_ in img_display]
#add heatmap to image, make sure alpha_1=0 if show_image=False
img_display = [cv2.addWeighted(img_display[i], alpha_1, img2[i], alpha_2, 0) for i in range(X_valid_.shape[0])]
#ToDo: this only works for RGB images. Adjust expression to work or grayscale and RGB
img_display = np.r_[img_display]
img_display = img_display.swapaxes(1,2)
img_display = np.append(img_display,img_display[-1:],axis=0)
self.popup_gradcam_ui.widget_image.setImage(img_display)
self.popup_gradcam_ui.widget_image.setCurrentIndex(currentindex)
self.popup_gradcam.show()
def popup_cm_reset(self):
self.popup_gradcam_ui.groupBox_image_Settings.setChecked(True)
self.popup_gradcam_ui.groupBox_gradCAM_Settings.setChecked(False)
#self.popup_gradcam_ui.doubleSpinBox_image_alpha.setValue(1)
self.popup_gradcam_ui.comboBox_gradCAM_targetLayer.setCurrentIndex(0)
#self.popup_gradcam_ui.comboBox_gradCAM_colorMap.setCurrentIndex(0)
self.popup_gradcam_ui.spinBox_gradCAM_targetClass.setValue(0)
def popup_show_model_summary(self):
#textbrowser popup
self.popup_modelsummary = MyPopup()
self.popup_modelsummary_ui = aid_frontend.popup_cm_modelsummary()
self.popup_modelsummary_ui.setupUi(self.popup_modelsummary) #open a popup to show images and options
text5 = "Model summary:\n"
summary = []
self.model_keras.summary(print_fn=summary.append)
summary = "\n".join(summary)
text = text5+summary
self.popup_modelsummary_ui.textBrowser_modelsummary.append(text)
self.popup_modelsummary.show()
def popup_to_tensorboard(self):
#Open the model in tensorboard
#Issue: I cannot stop the process. The appraoch below, which uses a
#separate thread for the function does not solve the issue
self.threadpool_single_queue += 1
if self.threadpool_single_queue == 1:
worker = Worker(self.tensorboad_worker)
def get_pid_from_worker(dic):
pid = dic["outp"]
#print("WORKER-PID")
#print("pid")
#os.kill(pid,signal.CTRL_C_EVENT)
#ToDo Find a way to kill that process!
worker.signals.history.connect(get_pid_from_worker)
self.threadpool_single.start(worker)
#print("PID-Here:")
#print(os.getpid())
#time.sleep(2)
def tensorboad_worker(self,progress_callback,history_callback):
#send the model to tensorboard (webbased application)
with tf.Session() as sess:
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
graph = K.get_session().graph # Get the sessions graph
#get a folder for that model in temp
temp_path = aid_bin.create_temp_folder()
modelname = os.path.split(self.load_model_path)[-1]
modelname = modelname.split(".model")[0]
log_dir = os.path.join(temp_path,modelname)
writer = tf.summary.FileWriter(logdir=log_dir, graph=graph)#write a log
#tb = program.TensorBoard()
tb = program.TensorBoard(default.get_plugins(), default.get_assets_zip_provider())
#tb.configure(argv=[None, '--logdir', log_dir,"--host","127.0.0.1"])
tb.configure(argv=[None, '--logdir', log_dir,"--host","localhost"])
url = tb.launch()
url = os.path.join(url)
os.system(r"start "+url)
pid = os.getpid()
dic = {"outp":pid}
#print("WORKER1-PID")
#print(pid)
history_callback.emit(dic) #return the pid (use it to kill the process)
self.threadpool_single_queue = 0 #reset the thread-counter
time.sleep(0.5)
def copy_cm_to_clipboard(self,cm1_or_cm2):
if cm1_or_cm2==1:
table = self.tableWidget_CM1
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
elif cm1_or_cm2==2:
table = self.tableWidget_CM2
cols = table.columnCount()
header = [table.horizontalHeaderItem(col).text() for col in range(cols)]
elif cm1_or_cm2==3: #this is for the classification report table tableWidget_AccPrecSpec
table = self.tableWidget_AccPrecSpec
cols = table.columnCount()
header = list(range(cols))
rows = table.rowCount()
tmp_df = pd.DataFrame(columns=header,index=range(rows))
for i in range(rows):
for j in range(cols):
try:
tmp_df.iloc[i, j] = table.item(i, j).text()
except:
tmp_df.iloc[i, j] = np.nan
tmp_df.to_clipboard()
if cm1_or_cm2<3:
self.statusbar.showMessage("Confusion matrix appended to clipboard.",2000)
if cm1_or_cm2==3:
self.statusbar.showMessage("Classification report appended to clipboard.",2000)
def assess_model_plotting(self):
if self.load_model_path == None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a model path first")
msg.setWindowTitle("No model path found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#If there is a ValidationSet on RAM-> use it!
if not type(self.ValidationSet) is type(None): #If ValidationSet is not none, there has been ValidationSet loaded already
self.statusbar.showMessage("Use validation data (from RAM) loaded earlier. If that is not good, please check and uncheck a file on 'Build' tab. This will delete the validation data from RAM",5000)
else: #Otherwise get the validation data from the stuff that is clicked on 'Build'-Tab
self.get_validation_data_from_clicked() #after that, self.ValidationSet will exist
#Check if input data is available
if type(self.ValidationSet)==type(None):
return
elif type(self.ValidationSet["X_valid"])==type(None):
return
#Check the input dimensions:
img_dim = self.ValidationSet["X_valid"].shape[-2]
model_in = int(self.spinBox_Crop_2.value())
if model_in!=img_dim:
self.ValidationSet = None
self.get_validation_data_from_clicked() #after that, self.ValidationSet will exist
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("New model has different input dimensions (image crop). Validation set is re-loaded (like when you clicked on files on build-tab)")
msg.setWindowTitle("Automatically re-loaded validation set")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
#X_valid = self.X_valid #<--dont do this since it is used only once (.predict) and it would require additional RAM; instad use self.X_valid for .predict
#Load the model and predict
with tf.Session() as sess:
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
self.model_keras = model_keras #useful to get the list of layers for Grad-CAM; also used to show the summary
in_dim = model_keras.get_input_shape_at(node_index=0)
if type(in_dim)==list:
multi_input = True
in_dim = in_dim[0]#discard the second (xtra input)
else:
multi_input = False
channels_model = in_dim[-1]
channels_data = self.ValidationSet["X_valid"].shape[-1]
#Compare channel dimensions of loaded model and validation set
if channels_model==3 and channels_data==1:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "Model expects 3 channels, but data has 1 channel!"
text = text+" Will stack available channel three times to generate RGB image."
msg.setText(text)
msg.setWindowTitle("Automatic adjustment of image channels")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#model wants rgb images, but provided data is grayscale->copy and stack 3 times
self.ValidationSet["X_valid"] = np.stack((self.ValidationSet["X_valid"][:,:,:,0],)*3, axis=-1)
elif channels_model==1 and channels_data==3:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
text = "Model expects 1 channel, but data has 3 channels!"
text = text+" Will use the luminosity formula to convert RGB to grayscale."
msg.setText(text)
msg.setWindowTitle("Automatic adjustment of image channels")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#model wants grayscale, but provided data is rgb
self.ValidationSet["X_valid"] = aid_img.rgb_2_gray(self.ValidationSet["X_valid"])
elif channels_model!=channels_data: #Model and validation data have differnt channel dims
text = "Model expects "+str(int(channels_model))+" channel(s), but data has "+str(int(channels_data))+" channel(s)!"
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText(text)
msg.setWindowTitle("Model and data channel dimension not equal!")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
if multi_input == False:
scores = model_keras.predict(self.ValidationSet["X_valid"])
if multi_input == True:
print("self.ValidationSet[Xtra_in]")
print(self.ValidationSet["Xtra_in"])
scores = model_keras.predict([self.ValidationSet["X_valid"],self.ValidationSet["Xtra_in"]])
#Get settings from the GUI
threshold = float(self.doubleSpinBox_sortingThresh.value())#threshold probability obove which a cell is sorted
target_index = int(self.spinBox_indexOfInterest.value())#index of the cell type that should be sorted for
thresh_on = bool(self.checkBox_SortingThresh.isChecked())
#Check that the target index alias "Sorting class" is actually a valid class of the model
out_dim = int(self.spinBox_OutClasses_2.value())
if not target_index<out_dim:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText("You set the 'Sorting class' to "+str(target_index)+" which is not a valid class of the loaded model. The model only has the following classes: "+str(range(out_dim)))
msg.setWindowTitle("Class not available in the model")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
dic = aid_bin.metrics_using_threshold(scores,y_valid,threshold,target_index,thresh_on) #returns dic = {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
self.Metrics = dic #write to a variable #
pred = dic["pred"]
cm = metrics.confusion_matrix(y_valid,pred,labels=range(scores.shape[1]))
cm_normalized = 100*cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#Show the metrics on tableWidget_CM1 and tableWidget_CM2
#inds_uni = set(list(set(y_valid))+list(set(pred))) #It could be that a cell-index is not present in the validation data, but, the dimension of the scores tells me, how many indices are supposed to appear
inds_uni = range(scores.shape[1]) #these indices are explained by model
#look in into tableWidget_Info_2 if there are user defined index names
rowCount = self.tableWidget_Info_2.rowCount()
#Only counts rows with input
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
try:
indices_on_table = [int(self.tableWidget_Info_2.item(row, 0).text()) for row in range(rowCount)]
names_on_table = [str(self.tableWidget_Info_2.item(row, 3).text()) for row in range(rowCount)]
except Exception as e:
#There was an error!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
#Check that len(names_on_table) <= len(inds_uni) ->it is impossible that the model for example can predict 2 classes, but there are 3 different classes in the validation set
if not len(names_on_table) <= len(inds_uni):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Model can only predict "+str(len(inds_uni))+" classes, but validation data contains "+str(len(names_on_table))+" classes")
msg.setWindowTitle("Too many classes in validation set")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
#return
CellNames = []
for ind in inds_uni:
#check if that index is present on table
where = np.where(np.array(indices_on_table)==ind)[0]
if len(where)==1:#if there is exaclty one item...
CellNames.append(np.array(names_on_table)[where]) #append the corresponding user defined name to a list
else:
CellNames.append(str(ind))
header_labels = [i[0] for i in CellNames]#list(inds_uni)]
#Table for CM1 - Total Nr of cells
self.tableWidget_CM1.setRowCount(len(inds_uni))
self.tableWidget_CM1.setColumnCount(len(inds_uni))
self.tableWidget_CM1.setHorizontalHeaderLabels(header_labels)
self.tableWidget_CM1.setVerticalHeaderLabels(header_labels)
for i in inds_uni:
for j in inds_uni:
rowPosition = i
colPosition = j
#Total nr of cells for each index
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(cm[i,j]))
self.tableWidget_CM1.setItem(rowPosition, colPosition, item)
self.tableWidget_CM1.resizeColumnsToContents()
self.tableWidget_CM1.resizeRowsToContents()
#Table for CM2 - Normalized Confusion matrix
self.tableWidget_CM2.setRowCount(len(inds_uni))
self.tableWidget_CM2.setColumnCount(len(inds_uni))
self.tableWidget_CM2.setHorizontalHeaderLabels(header_labels)
self.tableWidget_CM2.setVerticalHeaderLabels(header_labels)
for i in range(len(inds_uni)):
for j in range(len(inds_uni)):
rowPosition = i
colPosition = j
#Total nr of cells for each index
#put the index in column nr. 0
item = QtWidgets.QTableWidgetItem()
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
item.setData(QtCore.Qt.EditRole,str(cm_normalized[i,j]))
self.tableWidget_CM2.setItem(rowPosition, colPosition, item)
self.tableWidget_CM2.resizeColumnsToContents()
self.tableWidget_CM2.resizeRowsToContents()
############Fill tableWidget_AccPrecSpec with information##########
#Compute more metrics and put them on the table below
nr_target_init = float(len(np.where(y_valid==target_index)[0])) #number of target cells in the initial sample
conc_init = nr_target_init/float(len(y_valid)) #concentration of the target cells in the initial sample
acc = metrics.accuracy_score(y_valid,pred)
#Reset the table
self.tableWidget_AccPrecSpec.setColumnCount(0)#Reset table
self.tableWidget_AccPrecSpec.setRowCount(0)#Reset table
nr_cols = np.max([5,len(inds_uni)+1])
self.tableWidget_AccPrecSpec.setColumnCount(nr_cols) #Five columns
self.tableWidget_AccPrecSpec.setRowCount(7+len(inds_uni)+2) #Nr. of rows
#Put lots and lots of Info on tableWidget_AccPrecSpec
text_conc_init = "Init. conc. of cells from class/name "+header_labels[target_index]
self.tableWidget_AccPrecSpec.setItem(0 , 0, QtGui.QTableWidgetItem(text_conc_init))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(100*conc_init,4)))
self.tableWidget_AccPrecSpec.setItem(0, 1, item)
text_conc_final = "Final conc. in target region"
self.tableWidget_AccPrecSpec.setItem(1 , 0, QtGui.QTableWidgetItem(text_conc_final))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(dic["conc_target_cell"],4)))
self.tableWidget_AccPrecSpec.setItem(1, 1, item)
text_enrich = "Enrichment"
self.tableWidget_AccPrecSpec.setItem(2 , 0, QtGui.QTableWidgetItem(text_enrich))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(dic["enrichment"],4)))
self.tableWidget_AccPrecSpec.setItem(2, 1, item)
text_yield = "Yield"
self.tableWidget_AccPrecSpec.setItem(3 , 0, QtGui.QTableWidgetItem(text_yield))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(dic["yield_"],4)))
self.tableWidget_AccPrecSpec.setItem(3, 1, item)
text_acc = "Accuracy"#+str(round(acc,4))+"\n"
self.tableWidget_AccPrecSpec.setItem(4 , 0, QtGui.QTableWidgetItem(text_acc))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, float(np.round(acc,4)))
self.tableWidget_AccPrecSpec.setItem(4, 1, item)
text_classification_report = "Classification Report"#+metrics.classification_report(y_valid, pred, target_names=header_labels)
self.tableWidget_AccPrecSpec.setItem(5 , 0, QtGui.QTableWidgetItem(text_classification_report))
class_rep = metrics.classification_report(y_valid, pred,labels=inds_uni, target_names=header_labels,output_dict =True)
try:
df = pd.DataFrame(class_rep)
df = df.T
ax_left = df.axes[0]
for row in range(len(ax_left)):
self.tableWidget_AccPrecSpec.setItem(7+row, 0, QtGui.QTableWidgetItem(str(ax_left[row])))
ax_up = df.axes[1]
for col in range(len(ax_up)):
self.tableWidget_AccPrecSpec.setItem(6, 1+col, QtGui.QTableWidgetItem(str(ax_up[col])))
for row in range(df.shape[0]):
for col in range(df.shape[1]):
val = df.iloc[row,col]
val = float(np.round(val,4))
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, val)
self.tableWidget_AccPrecSpec.setItem(7+row, 1+col, item)
except Exception as e:
#There is an issue loading the files!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
self.tableWidget_AccPrecSpec.resizeColumnsToContents()
self.tableWidget_AccPrecSpec.resizeRowsToContents()
#AFTER the table is resized to the contents, fill in also information
#about the used data:
rowPosition = self.tableWidget_AccPrecSpec.rowCount()
self.tableWidget_AccPrecSpec.insertRow(rowPosition) #Insert a new row
self.tableWidget_AccPrecSpec.setItem(rowPosition , 0, QtGui.QTableWidgetItem("Used Files"))
rowPosition = self.tableWidget_AccPrecSpec.rowCount()
self.tableWidget_AccPrecSpec.insertRow(rowPosition) #Insert another row!
self.tableWidget_AccPrecSpec.setItem(rowPosition , 0, QtGui.QTableWidgetItem("File"))
#dic = {"SelectedFiles_valid":SelectedFiles_valid,"nr_events_epoch_valid":nr_events_epoch_valid,"rtdc_path_valid":[rtdc_path],"X_valid_orig":[X_valid_orig],"X_valid":X_valid,"y_valid":y_valid,"Indices":[Indices]}
rtdc_path_valid = self.ValidationSet["rtdc_path_valid"]
#nr_events_epoch_valid = self.ValidationSet["nr_events_epoch_valid"]
y_valid = self.ValidationSet["y_valid"] #y_valid is a long array containing the label of all cell (of all clicked files)
Indices = self.ValidationSet["Indices"] #Index is a list with arrays containing cell-indices (to track events in a data-set)
y_valid_uni = np.unique(np.array(y_valid),return_counts=True)
#set the column count the at least match the amount of different cell-types available
if self.tableWidget_AccPrecSpec.columnCount() < len(y_valid_uni[0]):
diff = len(y_valid_uni[0])-self.tableWidget_AccPrecSpec.columnCount()
for col_ind in range(diff):
colPosition = self.tableWidget_AccPrecSpec.columnCount()
self.tableWidget_AccPrecSpec.insertColumn(colPosition) #Insert a new col for each cell-type
#Create a column for each cell-type
for col_ind in range(len(y_valid_uni[0])):
#how_many = y_valid_uni[1][col_ind]
#self.tableWidget_AccPrecSpec.setItem(rowPosition , 1+col_ind, QtGui.QTableWidgetItem(float(how_many)))
content = "Class "+str(y_valid_uni[0][col_ind])
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, content)
self.tableWidget_AccPrecSpec.setItem(rowPosition , 1+col_ind, item)
loc = 0
for row in range(len(rtdc_path_valid)):
rowPosition = self.tableWidget_AccPrecSpec.rowCount()
self.tableWidget_AccPrecSpec.insertRow(rowPosition) #Insert a new row for each entry
self.tableWidget_AccPrecSpec.setItem(rowPosition , 0, QtGui.QTableWidgetItem(rtdc_path_valid[row]))
#y_valid_uni = np.unique(y_valid[row])
#self.tableWidget_AccPrecSpec.setItem(rowPosition , 1, QtGui.QTableWidgetItem(np.array(y_valid_uni)))
#self.tableWidget_AccPrecSpec.setItem(rowPosition , 2, QtGui.QTableWidgetItem(float(nr_events_epoch_valid[row])))
index = Indices[row] #get the array of indices of a single measurement
y_valid_i = y_valid[loc:loc+len(index)]
loc = loc+len(index)
y_valid_i_uni = np.unique(y_valid_i,return_counts=True)
for col_ind in range(len(y_valid_i_uni[0])):
#what is the cell-type
cell_type = int(y_valid_i_uni[0][col_ind])#cell-type index alway starts with 0. Nr. of cells of cell-type 0 remain to column 1
how_many = y_valid_i_uni[1][col_ind]
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.EditRole, int(how_many))
self.tableWidget_AccPrecSpec.setItem(rowPosition , 1+cell_type, item)
#Draw the probability histogram
self.probability_histogram()
#Finally, also update the third plot
self.thirdplot()
def create_random_table(self):
print("def create_random_table only useful for development")
# matrix = np.random.randint(0,100,size=(3,3))
# self.tableWidget_CM1.setRowCount(matrix.shape[0])
# self.tableWidget_CM1.setColumnCount(matrix.shape[1])
#
# for i in range(matrix.shape[0]):
# for j in range(matrix.shape[1]):
# item = QtWidgets.QTableWidgetItem()
# item.setData(QtCore.Qt.EditRole,str(matrix[i,j]))
# self.tableWidget_CM1.setItem(i, j, item)
#
# self.tableWidget_CM1.resizeColumnsToContents()
# self.tableWidget_CM1.resizeRowsToContents()
def probability_histogram(self): # def probability_histogram(self):
"""
Grab the scores of each class and show it in histogram
"""
if len(self.Metrics) ==0: #but if not give message and return
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There are no Metrics determined yet (use ->'Update Plots' first)")
msg.setWindowTitle("No Metrics found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
dic = self.Metrics #returns dic = {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
scores = dic["scores"]
#Get the available cell indices (cell-type identifier)
inds_uni = range(scores.shape[1]) #these indices are explained by model
threshold = float(self.doubleSpinBox_sortingThresh.value())#threshold probability obove which a cell is sorted
target_index = int(self.spinBox_indexOfInterest.value())#index of the cell type that should be sorted for
try:
#What is the probability of cell with y_valid=i that it belongs to class target_index?
scores_i = []
y_valid = self.ValidationSet["y_valid"]
for i in inds_uni:
ind = np.where(y_valid==i)[0]
if len(ind)>0: #if there are no cells available, dont append. In this case there will also be no color defined
scores_i.append(scores[ind,target_index])
except Exception as e:
#There is an issue loading the files!
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(str(e))
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
names_on_table = [str(self.tableWidget_Info_2.item(row, 3).text()) for row in range(rowCount)]
index_on_table = [int(self.tableWidget_Info_2.item(row, 0).text()) for row in range(rowCount)]
#On which row is the target_index?
ind = np.where(np.array(index_on_table)==target_index)[0]
if len(ind) == 1:
target_name = str(np.array(names_on_table)[ind][0])
else:
target_name = str(target_index)
#Get the user-defined colors from table
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#it can be that the table was not updated and there are more scores than table-items
if len(colors_on_table)!=len(scores_i):
#update table
SelectedFiles = self.items_clicked_no_rtdc_ds()
self.update_data_overview(SelectedFiles)
self.update_data_overview_2(SelectedFiles)
#update colors on table
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#Clear the plot
self.widget_probHistPlot.clear()
#Add plot
hist = self.widget_probHistPlot.addPlot()
hist.showGrid(x=True,y=True)
hist.setLabel('bottom', "p("+target_name+")", units='')
hist.setLabel('left', "#", units='')
#Get the user defined histogram style from the combobox
style = str(self.comboBox_probability_histogram.currentText())
for i in range(len(scores_i)): # I had previously range(len(scores_i)); but this causes an error if there is a cell-type missing in the validation set
hist_i = hist.plot()
if len(scores_i[i])>1:#only continue of there multiple events (histogram does not make sense otherwise)
range_hist = (scores_i[i].min(), scores_i[i].max())
first_edge, last_edge = np.lib.histograms._get_outer_edges(scores_i[i], range=range_hist)
try: #numpy 1.15
width = np.lib.histograms._hist_bin_selectors['auto'](scores_i[i])
except:#numpy >1.15
width = np.lib.histograms._hist_bin_selectors['auto'](scores_i[i],(np.min(scores_i[i]),np.min(scores_i[i])))
try:#prevent crash if width=0
n_equal_bins = int(np.ceil(np.lib.histograms._unsigned_subtract(last_edge, first_edge) / width))
except:
n_equal_bins = 1
if n_equal_bins>1E4: #Who needs more than 10k bins?!:
n_equal_bins = int(1E4)
else:
n_equal_bins='auto'
y,x = np.histogram(scores_i[i], bins=n_equal_bins)
if style=="Style1":
pencolor = pg.mkColor(colors_on_table[i].color())
pen = pg.mkPen(color=pencolor,width=5)
hist_i.setData(x, y, stepMode=True, pen=pen,clear=False)
elif style=="Style2":
pencolor = pg.mkColor(colors_on_table[i].color())
pen = pg.mkPen(color=pencolor,width=10)
hist_i.setData(x, y, stepMode=True, pen=pen,clear=False)
elif style=="Style3":
color = colors_on_table[i].color()
color.setAlpha(0.6*255.0)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
hist_i.setData(x, y, stepMode=True, fillLevel=0, brush=brush,clear=False)
elif style=="Style4":
color = colors_on_table[i].color()
color.setAlpha(0.7*255.0)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
hist_i.setData(x, y, stepMode=True, fillLevel=0, brush=brush,clear=False)
elif style=="Style5":
color = colors_on_table[i].color()
color.setAlpha(0.8*255.0)
pencolor = pg.mkColor(color)
brush = pg.mkBrush(color=pencolor)
hist_i.setData(x, y, stepMode=True, fillLevel=0, brush=brush,clear=False)
#Add a vertical line indicating the threshold
self.line = pg.InfiniteLine(pos=threshold, angle=90, pen='w', movable=False)
hist.addItem(self.line)
hist.setXRange(0, 1, padding=0)
def thirdplot(self):
target_index =self.spinBox_indexOfInterest.value()
cb_text = self.comboBox_3rdPlot.currentText()
if cb_text=='None':
return
if cb_text=='ROC-AUC':
#Check if self.Metrics are available
if len(self.Metrics) == 0:
self.assess_model_plotting() #run this function to create self.Metrics
dic = self.Metrics
else: #If no Metrics available yet...
dic = self.Metrics
if len(dic)==0:
return
#Get the ValidationSet
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
scores = dic["scores"]
inds_uni = list(range(scores.shape[1])) #these indices are explained by model
#ROC-curve is only available for binary problems:
Y_valid = np_utils.to_categorical(y_valid,num_classes=len(inds_uni))
# Compute ROC curve and ROC area for each class
fpr,tpr,roc_auc = dict(),dict(),dict()
for i in range(len(inds_uni)):
fpr[i], tpr[i], _ = metrics.roc_curve(Y_valid[:, i], scores[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = metrics.roc_curve(Y_valid.ravel(), scores.ravel())
roc_auc["micro"] = metrics.auc(fpr["micro"], tpr["micro"])
#Get the user-defined colors from table
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#Clear the plot
self.widget_3rdPlot.clear()
#Add plot
hist = self.widget_3rdPlot.addPlot()
hist.showGrid(x=True,y=True)
hist.addLegend()
hist.setLabel('bottom', "False Positive Rate", units='')
hist.setLabel('left', "True Positive Rate", units='')
for i, color in zip(range(len(inds_uni)), colors_on_table):
text = 'Class '+str(i)+', AUC='+str(round(roc_auc[i],2))
hist.plot(fpr[i], tpr[i], pen=None,symbol='o',symbolPen=None,symbolBrush=color,name=text,clear=False)
clr = color.color()
hist.plot(fpr[i],tpr[i],pen=clr)
hist.setXRange(0, 1, padding=0)
if cb_text=='Precision-Recall':
#Check if self.Metrics are available
if len(self.Metrics) == 0:
self.assess_model_plotting() #run this function to create self.Metrics
dic = self.Metrics
else: #Otherwise, there are Metrics available already :) Use them
dic = self.Metrics
if len(dic)==0:
return
#Get the ValidationSet
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
scores = dic["scores"]#[:,target_index]
inds_uni = list(range(scores.shape[1])) #these indices are explained by model
#ROC-curve is only available for binary problems:
Y_valid = np_utils.to_categorical(y_valid,num_classes=len(inds_uni))
# Compute Precision Recall curve and P-R area for each class
precision,recall,precision_recall_auc = dict(),dict(),dict()
for i in range(len(inds_uni)):
precision[i], recall[i], _ = metrics.precision_recall_curve(Y_valid[:, i], scores[:, i])
precision_recall_auc[i] = metrics.auc(recall[i], precision[i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = metrics.roc_curve(Y_valid.ravel(), scores.ravel())
precision_recall_auc["micro"] = metrics.auc(recall["micro"],precision["micro"])
#Get the user-defined colors from table
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#Clear the plot
self.widget_3rdPlot.clear()
#Add plot
hist = self.widget_3rdPlot.addPlot()
hist.showGrid(x=True,y=True)
hist.addLegend()
hist.setLabel('bottom', "Recall", units='')
hist.setLabel('left', "Precision", units='')
for i, color in zip(range(len(inds_uni)), colors_on_table):
text = 'Class '+str(i)+', AUC='+str(round(precision_recall_auc[i],2))
hist.plot(recall[i],precision[i], pen=None,symbol='o',symbolPen=None,symbolBrush=color,name=text,clear=False)
clr = color.color()
hist.plot(recall[i],precision[i],pen=clr)
hist.setXRange(0, 1, padding=0)
if cb_text=='Enrichment vs. Threshold' or cb_text=='Yield vs. Threshold' or cb_text=='Conc. vs. Threshold':
#Check if self.Metrics are available
if len(self.Metrics) == 0: #if not,
self.assess_model_plotting() #run this function to create self.Metrics
dic = self.Metrics
else: #If Metrics are already available, use it. Load it
dic = self.Metrics
if len(dic)==0:
return
scores = dic["scores"]
y_valid = self.ValidationSet["y_valid"] #load the validation labels to a new variable
#The dic only contains metrics for a single threshold, which is not enough
#call aid_bin.metrics_using_threshold with a range of thresholds:
#(it might makes sense to evaluate this for each possible target_index. Now only perform the measurement for the user defined target index)
Dics,Threshs = [],[]
for thresh in np.linspace(0,1,25):
dic_ = aid_bin.metrics_using_threshold(scores,y_valid,thresh,target_index) #returns dic = {"scores":scores,"pred":pred,"conc_target_cell":conc_target_cell,"enrichment":enrichment,"yield_":yield_}
Dics.append(dic_)
Threshs.append(thresh)
#Collect information in arrays
enrichment_ = np.array([d["enrichment"] for d in Dics])
yield__ = np.array([d["yield_"] for d in Dics])
conc_target_cell = np.array([d["conc_target_cell"] for d in Dics])
Threshs = np.array(Threshs)
rowCount = self.tableWidget_Info_2.rowCount()
#only count rows with content
rowCount = sum([self.tableWidget_Info_2.item(row, 0)!=None for row in range(rowCount)])
colors_on_table = [self.tableWidget_Info_2.item(row, 2).background() for row in range(rowCount)]
#Clear the plot
self.widget_3rdPlot.clear()
#Add plot
hist = self.widget_3rdPlot.addPlot()
hist.showGrid(x=True,y=True)
hist.addLegend()
hist.setLabel('bottom', "Threshold", units='')
color = '#0000ff'
if cb_text=='Enrichment vs. Threshold':
hist.setLabel('left', "Enrichment", units='')
hist.plot(Threshs,enrichment_, pen=None,symbol='o',symbolPen=None,symbolBrush=color,name='',clear=False)
hist.plot(Threshs,enrichment_,pen=color)
if cb_text=='Yield vs. Threshold':
hist.setLabel('left', "Yield", units='')
hist.plot(Threshs,yield__, pen=None,symbol='o',symbolPen=None,symbolBrush=color,name='',clear=False)
hist.plot(Threshs,yield__,pen=color)
if cb_text=='Conc. vs. Threshold':
hist.setLabel('left', "Conc. of target cell in target region", units='')
hist.plot(Threshs,conc_target_cell, pen=None,symbol='o',symbolPen=None,symbolBrush=color,name='',clear=False)
hist.plot(Threshs,conc_target_cell,pen=color)
hist.setXRange(0, 1, padding=0)
#Add indicator for the currently used threshold
threshold = float(self.doubleSpinBox_sortingThresh.value())#threshold probability obove which a cell is sorted
self.line = pg.InfiniteLine(pos=threshold, angle=90, pen='w', movable=False)
hist.addItem(self.line)
def classify(self):
#Very similar function to "Update Plot". But here, no graphs are produced
#Resulting scores/predictions etc are simply stored to excel file
#This function does NOT take labels.
#Check if a model was defined
if self.load_model_path == None:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please define a model path first")
msg.setWindowTitle("No model path found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
AvailableFiles = self.items_available()
rtdc_paths = [file_["rtdc_path"] for file_ in AvailableFiles]
#Classify all datasets or just one?
Files,FileIndex = [],[]
if self.radioButton_selectAll.isChecked():
Files = rtdc_paths
FileIndex = list(range(len(Files)))
if len(Files)==0:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("There are no files on the 'Build'-Tab")
msg.setWindowTitle("No files found")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
elif self.radioButton_selectDataSet.isChecked():
rtdc_path = self.comboBox_selectData.currentText()
Files.append(rtdc_path)
#get the index of this file on the table
FileIndex = [int(self.comboBox_selectData.currentIndex())]
#FileIndex = list(np.where(np.array(rtdc_path)==np.array(rtdc_paths))[0])
else:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("Please use the radiobuttons (left) to indicate if all or only a selected file should be classified.")
msg.setWindowTitle("No file(s) specified")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
return
print("Chosen file(s):")
print(Files)
#what input size is required by loaded model?
crop = int(self.spinBox_Crop_2.value())
norm = str(self.comboBox_Normalization_2.currentText())
paddingMode = str(self.comboBox_paddingMode.currentText())
color_mode = self.get_color_mode()
zoom_factors = [selectedfile["zoom_factor"] for selectedfile in AvailableFiles]
#zoom_order = [self.actionOrder0.isChecked(),self.actionOrder1.isChecked(),self.actionOrder2.isChecked(),self.actionOrder3.isChecked(),self.actionOrder4.isChecked(),self.actionOrder5.isChecked()]
#zoom_order = int(np.where(np.array(zoom_order)==True)[0])
zoom_order = int(self.comboBox_zoomOrder.currentIndex()) #the combobox-index is already the zoom order
xtra_in = set([selectedfile["xtra_in"] for selectedfile in AvailableFiles])
if len(xtra_in)>1:# False and True is present. Not supported
print("Xtra data is used only for some files. Xtra data needs to be used either by all or by none!")
return
xtra_in = list(xtra_in)[0]#this is either True or False
#if normalization method needs mean/std of training set, the metafile needs to be loaded:
if norm == "StdScaling using mean and std of all training data":
modelindex = int(self.spinBox_ModelIndex_2.value())
path,fname = os.path.split(self.load_model_path)
fname = fname.split(str(modelindex)+".model")[0]+"meta.xlsx"
metafile_path = os.path.join(path,fname)
parameters = pd.read_excel(metafile_path,sheet_name='Parameters')
mean_trainingdata = parameters["Mean of training data used for scaling"]
std_trainingdata = parameters["Std of training data used for scaling"]
else:
mean_trainingdata = None
std_trainingdata = None
#Create config (define which device to use)
if self.radioButton_cpu.isChecked():
deviceSelected = str(self.comboBox_cpu.currentText())
elif self.radioButton_gpu.isChecked():
deviceSelected = str(self.comboBox_gpu.currentText())
gpu_memory = float(self.doubleSpinBox_memory.value())
config_gpu = aid_dl.get_config(cpu_nr,gpu_nr,deviceSelected,gpu_memory)
with tf.Session(graph = tf.Graph(), config=config_gpu) as sess:
model_keras = load_model(self.load_model_path,custom_objects=aid_dl.get_custom_metrics())
in_dim = model_keras.get_input_shape_at(node_index=0)
#Get the color mode of the model
channels_model = in_dim[-1]
if channels_model==1:
color_mode='Grayscale'
elif channels_model==3:
color_mode='RGB'
else:
print("Invalid number of channels. AID only supports grayscale (1 channel) and RGB (3 channels) images.")
#Get the user-set export option (Excel or to 'userdef0' in .rtdc file)
export_option = str(self.comboBox_scoresOrPrediction.currentText())
if export_option == "Add predictions to .rtdc file (userdef0)" or export_option=="Add pred&scores to .rtdc file (userdef0 to 9)":
#Users sometimes need to have Donor-ID (Parent foldername) added to the .rtdc file
#Ask the user: Do you want to get a specific fixed addon to filename, OR do you want to have the parent-foldername added?
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Question)
text = "Do you want to get a specific fixed addon to filename, <b>or do you want to have the parent-foldername added for each file individually?"
text = "<html><head/><body><p>"+text+"</p></body></html>"
msg.setText(text)
msg.setWindowTitle("Filename-addon for created files")
msg.addButton(QtGui.QPushButton('Specific fixed addon...'), QtGui.QMessageBox.YesRole)
msg.addButton(QtGui.QPushButton('Parent foldername'), QtGui.QMessageBox.NoRole)
msg.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
retval = msg.exec_()
if retval==0:
#Get some user input:
fname_addon, ok = QtWidgets.QInputDialog.getText(self, 'Specific fixed addon...', 'Enter filname addon:')
if ok:
fname_addon = str(fname_addon)
else:
return
elif retval==1:
fname_addon = "Action:GetParentFolderName!"
else:
return
#Iterate over all Files
for iterable in range(len(Files)):#rtdc_path in Files:
print("Files:"+str(Files))
print("iterable:"+str(iterable))
rtdc_path = Files[iterable]
print("rtdc_path:"+str(rtdc_path))
print("FileIndex:"+str(FileIndex))
print("zoom_factors:"+str(zoom_factors))
f_index = FileIndex[iterable]
zoom_factor = zoom_factors[f_index]
#get all images, cropped correcetly
gen_train = aid_img.gen_crop_img(crop,rtdc_path,replace=True,random_images=False,zoom_factor=zoom_factor,zoom_order=zoom_order,color_mode=color_mode,padding_mode=paddingMode,xtra_in=xtra_in)
x_train,index,xtra_train = next(gen_train) #x_train-images of all cells, index-original index of all cells
if norm == "StdScaling using mean and std of all training data":
x_train = aid_img.image_normalization(x_train,norm,mean_trainingdata,std_trainingdata)
else:
x_train = aid_img.image_normalization(x_train,norm)
#Check the input dimensions:
img_dim = x_train.shape[-2]
model_in = int(self.spinBox_Crop_2.value())
if model_in!=img_dim:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setText("New model has different input dimensions (image crop). Validation set is re-loaded (clicked files on build-tab)")
msg.setWindowTitle("Input dimensions not fitting")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
scores = model_keras.predict(x_train)
scores_normal = np.copy(scores)
pred_normal = np.argmax(scores_normal,axis=1)
#Get settings from the GUI
threshold = float(self.doubleSpinBox_sortingThresh.value())#threshold probability obove which a cell is sorted
target_index = int(self.spinBox_indexOfInterest.value())#index of the cell type that should be sorted for
#Use argmax for prediction (threshold can only be applied to one index)
pred_normal = np.argmax(scores,axis=1)
#First: check the scores_in_function of the sorting index and adjust them using the threshold
pred_thresh = np.array([1 if p>threshold else 0 for p in scores[:,target_index]])
#replace the corresponding column in the scores_in_function
scores[:,target_index] = pred_thresh
#Determine the prediction again, considering the threshold for the target index
pred_thresh = np.argmax(scores,axis=1)
normal_or_thresh = bool(self.checkBox_SortingThresh.isChecked())
if normal_or_thresh==True: #(if its true means its normal means p=0.5)
prediction_to_rtdc_ds = pred_normal
if normal_or_thresh==False: #(if its false means its thresholded for some class)
prediction_to_rtdc_ds = pred_thresh
if export_option == "Scores and predictions to Excel sheet":
info = np.array([[self.load_model_path],[rtdc_path],[target_index],[threshold]]).T
info = pd.DataFrame(info,columns=["load_model_path","rtdc_path","target_class","threshold"])
#Combine all information in nice excel sheet
filename = rtdc_path.split(".rtdc")[0]+"_Prediction.xlsx"
writer = pd.ExcelWriter(filename, engine='openpyxl')
#Used files go to a separate sheet on the -session.xlsx
pd.DataFrame().to_excel(writer,sheet_name='Info') #initialize empty Sheet
info.to_excel(writer,sheet_name='Info')
pd.DataFrame().to_excel(writer,sheet_name='Scores_normal') #initialize empty Sheet
pd.DataFrame(scores_normal).to_excel(writer,sheet_name='Scores_normal')
pd.DataFrame().to_excel(writer,sheet_name='Prediction_normal') #initialize empty Sheet
pd.DataFrame(pred_normal).to_excel(writer,sheet_name='Prediction_normal')
pd.DataFrame().to_excel(writer,sheet_name='Scores_thresholded') #initialize empty Sheet
pd.DataFrame(scores).to_excel(writer,sheet_name='Scores_thresholded')
pd.DataFrame().to_excel(writer,sheet_name='Prediction_thresholded') #initialize empty Sheet
| pd.DataFrame(pred_thresh) | pandas.DataFrame |
#python imports
import os
import gc
import string
import random
import time
import pickle
import shutil
from datetime import datetime
#internal imports
from modules.Signal import Signal
from modules.Database import Database
from modules.Predictor import Classifier, ComplexBuilder
from modules.utils import calculateDistanceP, chunks, cleanPath, minMaxNorm, extractMeanByBounds, extractMetricByShiftBounds
import joblib
from joblib import Parallel, delayed, dump, load
import pandas as pd
import numpy as np
from collections import OrderedDict
from itertools import combinations
from multiprocessing import Pool, Value
from joblib import wrap_non_picklable_objects
#plotting
import matplotlib.pyplot as plt
import seaborn as sns
#sklearn imports
from sklearn.metrics import classification_report, homogeneity_score, v_measure_score, completeness_score
from sklearn.model_selection import ParameterGrid
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import RadiusNeighborsRegressor, KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale, minmax_scale, robust_scale
from scipy.stats import ttest_ind, f_oneway
#dimensional reduction
import umap
__VERSION__ = "0.4.48"
filePath = os.path.dirname(os.path.realpath(__file__))
pathToTmp = os.path.join(filePath,"tmp")
alignModels = { "LinearRegression": LinearRegression,
"RadiusNeighborsRegressor" : RadiusNeighborsRegressor,
"KNeighborsRegressor":KNeighborsRegressor
}
alignModelsParams = {
"LinearRegression": {},
"RadiusNeighborsRegressor" : {"weights":"distance","radius":30} ,
"KNeighborsRegressor":{"weights":"distance","n_neighbors":10}
}
STACKING_CLASSIFIER_GRID = {
'rf__max_depth': [70,None,30],#30,,
#'rf__max_features': ['auto'],
# 'rf__min_samples_leaf': [2, 3, 5],
'rf__min_samples_split': [2,4],#[2, 3, 4],
#'rf__n_estimators': [200],
"SVM__C" : [1, 10,1000],
"SVM__kernel": ['rbf','poly'],
'SVM__gamma': [0.01,10,100]
}
OPTICS_PARAM_GRID = {
"min_samples":[2,3,5,8],
"max_eps": [np.inf,2,1,0.9,0.8],
"xi": np.linspace(0,0.3,num=30),
"cluster_method" : ["xi"]
}
AGGLO_PARAM_GRID = {
"n_clusters":[None,115,110,105,100,90,95],
"distance_threshold":[None,0.5,0.4,0.2,0.1,0.05,0.01],
"linkage":["complete","single","average"]
}
AFF_PRO_PARAM = {"damping":np.linspace(0.5,1,num=50)}
HDBSCAN_PROPS = {
"min_cluster_size":[2,3,4,6],
"min_samples":[2,3,4,5]
}
#{"min_cluster_size":[2,3,4,6],"min_samples":[2,3,4,5,8,10]}
CLUSTER_PARAMS = {
"OPTICS":OPTICS_PARAM_GRID,
"AGGLOMERATIVE_CLUSTERING":AGGLO_PARAM_GRID,
"AFFINITY_PROPAGATION":AFF_PRO_PARAM,
"HDBSCAN":HDBSCAN_PROPS
}
svm_param_grid = {
'C': [1, 10, 100, 1000],
'kernel': ['linear','rbf','poly'],
'gamma': [0.01,0.1,1,2,3,4,5]
}
RF_GRID_SEARCH = {
'max_depth': [70,None,30,50,10],#30,,,50,5
'max_features': ['auto'],
'min_samples_leaf': [2,5,3,15], # 5, 15
'min_samples_split': [2 ,3,10],
'n_estimators': [300, 500, 600]
}
entriesInChunks = dict()
class ComplexFinder(object):
def __init__(self,
addImpurity = 0.0,
alignMethod = "RadiusNeighborsRegressor",#"RadiusNeighborsRegressor",#"KNeighborsRegressor",#"LinearRegression", # RadiusNeighborsRegressor
alignRuns = False,
alignWindow = 3,
allowSingleFractionQuant = False,
analysisMode = "label-free", #[label-free,SILAC,SILAC-TMT]
analysisName = None,
binaryDatabase = False,
classifierClass = "random_forest",
classifierTestSize = 0.25,
classiferGridSearch = RF_GRID_SEARCH,#STACKING_CLASSIFIER_GRID,#
compTabFormat = False,
considerOnlyInteractionsPresentInAllRuns = 2,
correlationWindowSize = 5,
databaseFilter = {'Organism': ["Human"]},#{'Organism': ["Human"]},#{"Confidence" : [1,2,3,4]} - for hu.map2.0,# {} for HUMAN_COMPLEX_PORTAL
databaseIDColumn = "subunits(UniProt IDs)",
databaseFileName = "20190823_CORUM.txt",#"humap2.txt
databaseHasComplexAnnotations = True,
databaseEntrySplitString = ";",
decoySizeFactor = 1.2,
grouping = {"WT": ["D3_WT_03.txt"]},
hdbscanDefaultKwargs = {"min_cluster_size":4,"min_samples":1},
indexIsID = False,
idColumn = "Uniprot ID",
interactionProbabCutoff = 0.7,
justFitAndMatchPeaks = False,
keepOnlySignalsValidInAllConditions = False,
kFold = 3,
maxPeaksPerSignal = 15,
maxPeakCenterDifference = 1.8,
metrices = ["apex","pearson","euclidean","cosine","max_location","rollingCorrelation"], #"umap-dist"
metricesForPrediction = None,#["pearson","euclidean","apex"],
metricQuantileCutoff = 0.001,
minDistanceBetweenTwoPeaks = 3,
minimumPPsPerFeature = 6,
minPeakHeightOfMax = 0.05,
n_jobs = 12,
noDatabaseForPredictions = False,
normValueDict = {},
noDistanceCalculationAndPrediction = False,
peakModel = "LorentzianModel",#"GaussianModel",#"SkewedGaussianModel",#"LorentzianModel",
plotSignalProfiles = False,
plotComplexProfiles = False,
precision = 0.5,
r2Thresh = 0.85,
removeSingleDataPointPeaks = True,
restartAnalysis = False,
retrainClassifier = False,
recalculateDistance = False,
rollingWinType = None,
runName = None,
scaleRawDataBeforeDimensionalReduction = True,
smoothSignal = True,
smoothWindow = 2,
takeRondomSampleFromData =False,
topNCorrFeaturesForUMAPAlignment = 200,
TMTPoolMethod = "sum",
transformQuantDataBy = None,
useRawDataForDimensionalReduction = False,
useFWHMForQuant = True,
umapDefaultKwargs = {"min_dist":0.001,"n_neighbors":5,"n_components":2,"random_state":120},
quantFiles = [],
usePeakCentricFeatures = False
):
"""
Init ComplexFinder Class
Parameters
----------
* alignMethod = "RadiusNeighborsRegressor",
* alignRuns = False,
Alignment of runs is based on signal profiles that were found to have
a single modelled peak. A refrence run is assign by correlation anaylsis
and choosen based on a maximum R2 value. Then fraction-shifts per signal
profile is calculated (must be in the window given by *alignWindow*).
The fraction residuals are then modelled using the method provided in
*alignMethod*. Model peak centers are then adjusted based on the regression results.
Of note, the alignment is performed after peak-modelling and before distance calculations.
* alignWindow = 3,
Number of fraction +/- single-peal profile are accepted for the run alignment.
* analysisMode = "label-free", #[label-free,SILAC,SILAC-TMT]
* analysisName = None,
* binaryDatabase = False,
* classifierClass = "random_forest",
* classifierTestSize = 0.25,
Fraction of the created database containing positive and negative protein-protein
interactions that will be used for testing (for example ROC curve analysis) and classification report.
* classiferGridSearch = RF_GRID_SEARCH.
Dict with keywords matching parameters/settings of estimator (SVM, random forest)
and list of values forming the grid used to find the best estimator settings (evaluated
by k-fold cross validation). Runtime is effected by number of parameter settings as well as k-fold.
* compTabFormat = False
True indicates that the data are in the CompBat data format which was recently introduced.
In contrast to standard txt files generated by for example MaxQuant. It contains multiple
headers. More information can be found here https://www3.cmbi.umcn.nl/cedar/browse/comptab
ComplexFinder will try to identifiy the samples and fractions and create separeted txt files.
* considerOnlyInteractionsPresentInAllRuns = 2,
Can be either bool to filter for protein - protein interactions that are present
in all runs. If an integer is provided. the pp interactions are filtered based on
the number of runs in which they were quantified. A value of 4 would indicate that
the pp interaction must have been predicted in all runs.
* correlationWindowSize = 5,
Number of fractions used for rolling pearson correlation
* databaseFilter = {'Organism': ["Human"]},
Filter dict used to find relevant complexes from database. By default,
the corum database is filtered based on the column 'Organism' using 'Mouse' as a search string.
If no filtering is required, pass an empty dict {}.
* databaseIDColumn = "subunits(UniProt IDs)",
* databaseFileName = "20190823_CORUM.txt",
* databaseHasComplexAnnotations = True,
Indicates if the provided database does contain complex annotations. If you have a database with
only pairwise interactions, this setting should be *False*. Clusters are identified by dimensional
reduction and density based clustering (HDBSCAN). In order to alter UMAP and HDBSCAN settings use the
kewywords *hdbscanDefaultKwargs* and *umapDefaultKwargs*.
* databaseEntrySplitString = ";",
String by which complex members are separated in the provided database. CORUM = ";", Embl ComplexMap = "|"
* decoySizeFactor = 1.2,
Size factor for creating the decoy database from the positive proterin connectivity database such as CORUM.
* grouping = {"WT": ["D3_WT_04.txt","D3_WT_02.txt"],"KO":["D3_KO_01.txt","D3_KO_02.txt"]},
None or dict. Indicates which samples (file) belong to one group. Let's assume 4 files with the name
'KO_01.txt', 'KO_02.txt', 'WT_01.txt' and 'WT_02.txt' are being analysed.
The grouping dict should like this : {"KO":[KO_01.txt','KO_02.txt'],"WT":['WT_01.txt','WT_02.txt']}
in order to combine them for statistical testing (e.g. t-test of log2 transformed peak-AUCs).
Note that when analysis multiple runs (e.g. grouping present) then calling ComplexFinder().run(X) - X must be a
path to a folder containing the files.
When using compTabFormat = True. Provide the sample name as <compTabFileName>:<SampleName>.
* hdbscanDefaultKwargs = {"min_cluster_size":4,"min_samples":1},
* indexIsID = False,
* idColumn = "Uniprot ID",
* interactionProbabCutoff = 0.7
Cutoff for estimator probability. Interactions with probabilities below threshold will be removed.
* keepOnlySignalsValidInAllConditions = False
If True, removes all Signals that were not found to be valid in all files (experiments).
* kFold = 3
Cross validation of classifier optimization.
* justFitAndMatchPeaks = False
If true, the pipeline stops after peak detection/model fitting and matching of peaks (if more than one file is supplied.)
* maxPeaksPerSignal = 15
Number of peaks allowed for on signal profile.
* maxPeakCenterDifference = 1.8
* metrices = ["apex","pearson","euclidean","p_pearson","max_location","umap-dist","rollingCorrelation"], Metrices to access distance between two profiles. Can be either a list of strings and/or dict. In case of a list of dicts, each dict must contain the keywords: 'fn' and 'name' providing a callable function with 'fn' that returns a single floating number and takes two arrays as an input.
* metricesForPrediction = None
* metricQuantileCutoff = 0.90
* minDistanceBetweenTwoPeaks = 3
Distance in fractions (int) between two peaks. Setting this to a smaller number results in more peaks.
* n_jobs = 12,
Number of workers to model peaks, to calculate distance pairs and to train and use the classifer.
* noDatabaseForPredictions = False,
If you want to use ComplexFinder without any database. Set this to *True*.
* normValueDict = {},
* noDistanceCalculationAndPrediction = False,
Set to *True* to use ComplexFinder without distance calculation and database prediction.
* peakModel = "GaussianModel",
Indicates which model should be used to model signal profiles. In principle all models from lmfit can be used.
However, the initial parameters are only optimized for GaussianModel and LaurentzianModel.
This might effect runtimes dramatically.
* plotSignalProfiles = False,
If True, each profile is plotted against the fractio along with the fitted models.
If you are concerned about time, you might set this to False at the cost of losing visible asessment of the fit quality.
* plotComplexProfiles = False,
* precision = 0.5
Precision to use to filter protein-protein interactions.
If None, the filtering will be performed based on the parameter *interactionProbabCutoff*.
* r2Thresh = 0.85
R2 threshold to accept a model fit. Models below the threshold will be ignored.
* removeSingleDataPointPeaks = True,
* restartAnalysis = False, bool.
Set True if you want to restart the anaylsis from scratch. If the tmp folder exsists, items and dirs will be deleted first.
* retrainClassifier = False,
Even if the trainedClassifier.sav file is found, the classifier is loaded and the training is skipped.
If you change the classifierGridSearch, you should set this to True.
This will ensure that the classifier training is never skipped.
* recalculateDistance = False,
* rollingWinType = None,
If None, all points are evenly weighted. Can be any string of scipy.signal window function.
(https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows)
* runName = None,
* <del>savePeakModels = True</del> *depracted. always True and will be removed in the next version*.
* scaleRawDataBeforeDimensionalReduction = True,
If raw data should be used (*useRawDataForDimensionalReduction*)
enable this if you want to scale them. Scaling will be performed that values of each row are scaled between zero and one.
* smoothSignal = True
Enable/disable smoothing. Defaults to True. A moving average of at least 3 adjacent datapoints is calculated using
pandas rolling function. Effects the analysis time as well as the nmaximal number of peaks detected.
* smoothWindow = 2,
* topNCorrFeaturesForUMAPAlignment = 200,
Number of profiles used for UMAP Alignment. Only used if useRawDataForDimensionalReduction = True or noDistanceCalculationAndPrediction = True. The Features
will be identified by calculating the Pearson correlation coefficient.
* useRawDataForDimensionalReduction = False, Setting this to true, will force the pipeline to use the raw values for dimensional reduction. Distance calculations are not automatically turned off and the output is generated but they are not used.
* useFWHMForQuant = True
If quantFiles is specific, will use the FWHM for peak centric quantification. By default at least a mean of +/- peak centric fraction will
be consider (e.g. 3 fraction). However you cann allow single fraction quantification for narrow peaks by setting 'allowSingleFractionQuant' to True.
* umapDefaultKwargs = {"min_dist":0.0000001,"n_neighbors":3,"n_components":2},
If you want to perform an aligned UMPA consider altering the parameter alignment_window_size and alignment_regularisation. Find more information here
(https://umap-learn.readthedocs.io/en/latest/aligned_umap_basic_usage.html#aligning-varying-parameters)
* quantFiles = dict
* Quantifiaction files. dict with key name of co-fraction file and values with the path to the quantificaation file
Assuming your grouping is something like: {"WT":["WT_01.txt","WT_02.txt"]}. Then the quantification files must
contain a key for each file: something like {"WT_01.txt":"myCoolProject/quant/WT01_quant.txt","WT_02.txt":"myCoolProject/quant/WT02_quant.txt"}.
Assuming the folder myCoolProject/ exists where the main file is.
If analysing a TMT-SILAC experiment it is required to provide TMT labelings for heavy and light peaks separately, the
provided dict should look something like this:
{
"HEAVY_WT_01.txt":"myCoolProject/quant/WT01_quant_heavy.txt",
"LIGHT_WT_01.txt":"myCoolProject/quant/WT01_quant_light.txt"
}
Returns
-------
None
"""
self.params = {
"addImpurity" : addImpurity,
"indexIsID" : indexIsID,
"idColumn" : idColumn,
"n_jobs" : n_jobs,
"kFold" : kFold,
"analysisName" : analysisName,
"restartAnalysis" : restartAnalysis,
"metrices" : metrices,
"peakModel" : peakModel,
"smoothWindow" : smoothWindow,
"classifierClass" : classifierClass,
"retrainClassifier" : retrainClassifier,
"interactionProbabCutoff":interactionProbabCutoff,
"maxPeaksPerSignal" : maxPeaksPerSignal,
"maxPeakCenterDifference" : maxPeakCenterDifference,
"classiferGridSearch" : classiferGridSearch,
"plotSignalProfiles" : plotSignalProfiles,
"savePeakModels" : True, #must be true to process pipeline, depracted, remove from class arguments.
"removeSingleDataPointPeaks" : removeSingleDataPointPeaks,
"grouping" : grouping,
"analysisMode" : analysisMode,
"normValueDict" : normValueDict,
"databaseFilter" : databaseFilter,
"databaseIDColumn" : databaseIDColumn,
"databaseFileName" : databaseFileName,
"databaseHasComplexAnnotations" : databaseHasComplexAnnotations,
"r2Thresh" : r2Thresh,
"smoothSignal" : smoothSignal,
"umapDefaultKwargs" : umapDefaultKwargs,
"hdbscanDefaultKwargs" : hdbscanDefaultKwargs,
"noDatabaseForPredictions" : noDatabaseForPredictions,
"alignRuns" : alignRuns,
"alignMethod" : alignMethod,
"runName" : runName,
"useRawDataForDimensionalReduction" : useRawDataForDimensionalReduction,
"scaleRawDataBeforeDimensionalReduction" : scaleRawDataBeforeDimensionalReduction,
"metricQuantileCutoff": metricQuantileCutoff,
"recalculateDistance" : recalculateDistance,
"metricesForPrediction" : metricesForPrediction,
"minDistanceBetweenTwoPeaks" : minDistanceBetweenTwoPeaks,
"minimumPPsPerFeature" : minimumPPsPerFeature,
"plotComplexProfiles" : plotComplexProfiles,
"decoySizeFactor" : decoySizeFactor,
"classifierTestSize" : classifierTestSize,
"considerOnlyInteractionsPresentInAllRuns" : considerOnlyInteractionsPresentInAllRuns,
"precision" : precision,
"quantFiles" : quantFiles,
"compTabFormat" : compTabFormat,
"correlationWindowSize" : correlationWindowSize,
"takeRondomSampleFromData" : takeRondomSampleFromData,
"minPeakHeightOfMax" : minPeakHeightOfMax,
"justFitAndMatchPeaks" : justFitAndMatchPeaks,
"keepOnlySignalsValidInAllConditions" : keepOnlySignalsValidInAllConditions,
"noDistanceCalculationAndPrediction" : noDistanceCalculationAndPrediction,
"topNCorrFeaturesForUMAPAlignment" : topNCorrFeaturesForUMAPAlignment,
"databaseEntrySplitString": databaseEntrySplitString,
"version" : __VERSION__,
"usePeakCentricFeatures" : usePeakCentricFeatures,
"allowSingleFractionQuant" : allowSingleFractionQuant,
"useFWHMForQuant" : useFWHMForQuant,
"TMTPoolMethod" : TMTPoolMethod,
"transformQuantDataBy" : transformQuantDataBy
}
print("\n" + str(self.params))
self._checkParameterInput()
def _addMetricesToDB(self,analysisName):
"""
Adds distance metrices to the database entries
that were found in the co-elution profiles.
Parameters
----------
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("Info :: Skipping matching metrices to DB.")
return
if "signalDiff" in self.params["metrices"]:
self.params["metrices"] = [x for x in self.params["metrices"] if x != "signalDiff"] + ["{}-diff".format(x) for x in np.arange(self.Xs[analysisName].columns.size)]
metricColumns = self.params["metrices"]
if not self.params["noDatabaseForPredictions"]:
self.DB.matchMetrices(self.params["pathToTmp"][analysisName],entriesInChunks[analysisName],metricColumns,analysisName,forceRematch=self.params["recalculateDistance"])
def _addMetricToStats(self,metricName, value):
"""
Adds a metric to the stats data frame.
Does not check if metric is represent, if present,
it will just overwrite.
Parameters
----------
metricName str
Name of metric to add
value str
Value of metric
Returns
-------
None
"""
if metricName in self.stats.columns:
self.stats.loc[self.currentAnalysisName,metricName] = value
def _addModelToSignals(self,signalModels):
"""
Adds fitted models to Signals. If not a valid
model was found, then the signal profile is removed.
Parameters
----------
signalModels - list
List of modelfits (dict)
Returns
-------
None
"""
for fitModel in signalModels:
modelID = fitModel["id"]
if len(fitModel) == 1:
del self.Signals[self.currentAnalysisName][modelID]
if modelID in self.Signals[self.currentAnalysisName]:
for k,v in fitModel.items():
if k != 'id':
setattr(self.Signals[self.currentAnalysisName][modelID],k,v)
self.Signals[self.currentAnalysisName][modelID].saveResults()
def _attachQuantificationDetails(self, combinedPeakModels = None):
"""
"""
if self.params["analysisMode"] == "label-free":
if len(self.params["quantFiles"]) != 0:
print("Warning :: Quant files have been specified but anaylsis mode is label-free. Please define SILAC or TMT or SILAC-TMT")
print("Info :: Label-free mode selected. No additation quantification performed..")
return
if len(self.params["quantFiles"]) > 0:
files = np.array(list(self.params["grouping"].values())).flatten()
print(files)
print(self.params["quantFiles"].keys())
if len(self.params["quantFiles"]) != files.size and self.params["analysisMode"] != "SILAC-TMT":
print("Warning :: Different number of quantFiles and groupings provided.")
if self.params["analysisMode"] != "SILAC-TMT":
initFilesFound = [k for k in self.params["quantFiles"].keys() if k in files]
else:
print(self.params["quantFiles"])
for k in self.params["quantFiles"].keys():
print(k.split("HEAVY_",maxsplit=1))
initFilesFound = [k for k in self.params["quantFiles"].keys() if k.split("HEAVY_",maxsplit=1)[-1] in files or k.split("LIGHT_",maxsplit=1)[-1] in files]
print("Info :: For the following files and correpsonding co-elution profile data was detected")
print(initFilesFound)
print("Warning :: other files will be ignored.")
# elif self.params["analysisMode"] == "SILAC-TMT":
# if not all(f.startswith("HEAVY") or f.startswith("LIGHT") for f in self.params["quantFiles"].keys()):
# print("Warning :: If using a SILAC-TMT experiment, please provide 'HEAVY' and 'LIGHT' before the file in the dict 'quantFile' such as 'HEAVY_WT_01.txt':<path to quant file> as well as 'LIGHT_WT_01.txt':<path to quant file>")
print("combining Peaks!!")
if combinedPeakModels is None:
## load combined peak reuslts
txtOutput = os.path.join(self.params["pathToComb"],"CombinedPeakModelResults.txt")
if os.path.exists(txtOutput):
combinedPeakModels = pd.read_csv(txtOutput,sep="\t")
else:
print("Warning :: Combined peak model reuslts not found. Deleted? Skipping peak centric quantification.")
return
print("Info :: Starting peak centric quantification. In total {} peaks were found".format(combinedPeakModels.index.size))
print("Info :: Loading quantification files.")
if not all(os.path.exists(pathToQuantFile) for pathToQuantFile in self.params["quantFiles"].values()):
print("Warning :: Not all quant files found!")
if self.params["analysisMode"] != "SILAC-TMT":
print(self.params["quantFiles"].values())
path = list(self.params["quantFiles"].values())
print(os.path.abspath(path[0]))
quantFilesLoaded = [(k,pd.read_csv(v,sep="\t",index_col = 0),False) for k,v in self.params["quantFiles"].items() if os.path.exists(v) and k in initFilesFound]
else:
quantFilesLoaded = [(k.split("HEAVY_",maxsplit=1)[-1] if "HEAVY" in k else k.split("LIGHT_",maxsplit=1)[-1],pd.read_csv(v,sep="\t",index_col = 0),"LIGHT" in k) for k,v in self.params["quantFiles"].items() if os.path.exists(v) and k in initFilesFound]
if len(quantFilesLoaded) == 0:
print("Warning :: No quant files found. Skipping peak-centric quantification.")
return
if self.params["analysisMode"] == "SILAC":
print("Info :: Peak centric quantification using SILAC :: Assuming one SILAC ratio per fraction .")
elif self.params["analysisMode"] == "TMT":
print("Info :: Peak centric quantification using TMT :: Assuming the following order:")
print("Ignoring column headers, just uses the column index as follow..")
print("Fraction 1 - TMT reporter 1, Fraction 1 - TMT reporter 2, Faction 2 - TMT reporter 3 .... Fraction 2 - TMT reporter 1")
extractedQuantFiles = []
for k,quantFile,isLightQuantData in quantFilesLoaded:
print("Info :: Quantification of ", k)
centerColumnName = "Center_{}".format(k)
fwhmColumnName = "fwhm_{}".format(k)
quantFileName = "Q({})".format(k)
combinedPeakModelsFiltered = combinedPeakModels.dropna(subset=[centerColumnName])
lowerBound = combinedPeakModelsFiltered[centerColumnName] - combinedPeakModelsFiltered[fwhmColumnName]/1.7
upperBound = combinedPeakModelsFiltered[centerColumnName] + combinedPeakModelsFiltered[fwhmColumnName]/1.7
peakBounds = np.concatenate([lowerBound.values.reshape(-1,1),upperBound.values.reshape(-1,1)],axis=1)
peakBounds[:,1] += 1 #add one extra to use bounds as a range in python
#check bounds
peakBounds[peakBounds[:,0] < 0, 0] = 0
peakBounds[peakBounds[:,1] >= quantFile.columns.size, 1] = quantFile.columns.size - 1
#transform bounds to ints
peakBounds = np.around(peakBounds,0).astype(np.int64)
quantData = quantFile.loc[combinedPeakModelsFiltered["Key"].values].values
if self.params["analysisMode"] == "SILAC":
print("Info :: Peak centric quantification using SILAC :: extracting mean from file {}.".format(k))
out = extractMeanByBounds(
NPeakModels = combinedPeakModelsFiltered.index.size,
peakBounds = peakBounds,
quantData = quantData
)
quantColumnNames = ["SILAC({})_Mean".format(quantFileName),"SILAC({})_Error".format(quantFileName)]
print(out)
print(quantColumnNames)
dfResult = pd.DataFrame(out,index=combinedPeakModelsFiltered.index, columns = quantColumnNames)
dfResult = dfResult.join(pd.DataFrame(peakBounds,index=combinedPeakModelsFiltered.index, columns = ["SILAC({})_LowerBound".format(quantFileName),"SILAC({})_UpperBound".format(quantFileName)]))
extractedQuantFiles.append(dfResult)
elif self.params["analysisMode"] == "TMT":
print("Info :: Peak centric quantification using TMT :: extracting sum from TMT reporters using file {}".format(self.params["quantFiles"][k]))
print("Info :: Detecting reporter channles..")
nFractions = self.Xs[k].shape[1]
nTMTs = quantData.shape[1] / nFractions
print("Info :: {} reporter channels detected and {} fractions.".format(nTMTs,nFractions))
if nTMTs != int(nTMTs):
print("Warning :: Could not detect the number of TMT reporter channles. Please check columns in quantFiles to have nTMTx x fractions columns")
continue
nTMTs = int(nTMTs)
out = extractMetricByShiftBounds(
NPeakModels = combinedPeakModels.index.size,
peakBounds = peakBounds,
quantData = quantData,
shift = nTMTs,
nFractions = nFractions
)
quantColumnNames = []
dfResult = pd.DataFrame(out,index=combinedPeakModels.index, columns = quantColumnNames)
extractedQuantFiles.append(dfResult)
elif self.params["analysisMode"] == "SILAC-TMT":
print("Info :: Extracting quantification details from SILAC-TMT data.")
print("Info :: Detecting reporter channles..")
nFractions = self.Xs[k].shape[1]
nTMTs = quantData.shape[1] / nFractions
print("Info :: {} reporter channels detected and {} fractions.".format(nTMTs,nFractions))
if nTMTs != int(nTMTs):
print("Warning :: Could not detect the number of TMT reporter channles. Please check columns in quantFiles to have nTMTx x fractions columns")
continue
nTMTs = int(nTMTs)
# print(peakBounds)
# print(combinedPeakModels["Key"])
# print(isLightQuantData)
quantData[quantData == 0.0] = np.nan
out = extractMetricByShiftBounds(
NPeakModels = combinedPeakModels.index.size,
peakBounds = peakBounds,
quantData = quantData,
shift = nTMTs,
nFractions = nFractions
)
#print(out)
if isLightQuantData:
quantColumnNames = ["L_({})_tmt_intensity_{}".format(k,n) for n in range(nTMTs)]
else:
quantColumnNames = ["H_({})_tmt_intensity_{}".format(k,n) for n in range(nTMTs)]
# print(a)
dfResult = pd.DataFrame(out,index=combinedPeakModels.index, columns = quantColumnNames)
extractedQuantFiles.append(dfResult)
combinedPeakModels = combinedPeakModels.join(extractedQuantFiles)
txtOutput = os.path.join(self.params["pathToComb"],"CombinedPeakModelResultsQuant.txt")
combinedPeakModels.to_csv(txtOutput,sep="\t")
def _checkParameterInput(self):
"""
Checks the input.
Parameters
----------
Returns
-------
None
Raises
-------
ValueErrors if datatype if given parameters do not match.
"""
#check anaylsis mode
validModes = ["label-free","SILAC","SILAC-TMT","TMT"]
if self.params["analysisMode"] not in validModes:
raise ValueError("Parmaeter analysis mode is not valid. Must be one of: {}".format(validModes))
elif self.params["analysisMode"] != "label-free" and len(self.params["quantFiles"]) == 0:
raise ValueError("Length 'quantFiles must be at least 1 if the analysis mode is not set to 'label-free'.")
if not isinstance(self.params["maxPeaksPerSignal"],int):
raise ValueError("maxPeaksPerSignal must be an integer. Current setting: {}".forma(self.params["maxPeaksPerSignal"]))
elif self.params["maxPeaksPerSignal"] <= 2:
raise ValueError("maxPeaksPerSignal must be greater than or equal 2")
elif self.params["maxPeaksPerSignal"] > 20:
print("Warning :: maxPeaksPerSignal is set to above 20, this may take quite long to model.")
#r2 validation
if not isinstance(self.params["r2Thresh"],float):
raise ValueError("Parameter r2Trehsh mus be a floating number.")
elif self.params["r2Thresh"] < 0.5:
print("Warning :: threshold for r2 is set below 0.5. This might result in fits of poor quality")
elif self.params["r2Thresh"] > 0.95:
print("Warning :: threshold for r2 is above 0.95. Relatively few features might pass this limit.")
elif self.params["r2Thresh"] > 0.99:
raise ValueError("Threshold for r2 was above 0.99. Please set a lower value.")
#minPeakHeightOfMax
if not isinstance(self.params["minPeakHeightOfMax"],float) and self.params["minPeakHeightOfMax"] < 1 and self.params["minPeakHeightOfMax"] >= 0:
raise ValueError("Parameter 'minPeakHeightOfMax' must be a float smaller than 1.0 and greather/equal 0.0.")
#k-fold
if not isinstance(self.params["kFold"],int):
raise ValueError("Parameter kFold mus be an integer.")
elif self.params["kFold"] < 2:
raise ValueError("Parameter kFold must be at least 2.")
if self.params["alignMethod"] not in alignModels:
raise ValueError("Parameter alignMethod must be in {}".format(alignModels.values()))
if not isinstance(self.params["metricQuantileCutoff"],float) or self.params["metricQuantileCutoff"] <= 0 or self.params["metricQuantileCutoff"] >= 1:
raise ValueError("Parameter metricQuantileCutoff must be a float greater than 0 and smaller than 1.")
#add database checks
if self.params["metricesForPrediction"] is not None:
if not isinstance(self.params["metricesForPrediction"],list):
raise TypeError("metricesForPrediction must be a list.")
else:
if not all(x in self.params["metrices"] for x in self.params["metricesForPrediction"]):
raise ValueError("All metrices given in 'metricesForPrediction' must be present in 'metrices'.")
else:
self.params["metricesForPrediction"] = self.params["metrices"]
def _chunkPrediction(self,pathToChunk,classifier,nMetrices,probCutoff):
"""
Predicts for each chunk the proability for positive interactions.
Parameters
----------
pathToChunk : str
classifier : classfierClass
Trained classifier.
nMetrices : int
Number if metrices used. (since chunks are simple numpy arrays, no column headers are loaded)
probCutoff : float
Probability cutoff.
Returns
-------
Numpy array. Chunks with appended probability.
"""
X = np.load(pathToChunk,allow_pickle=True)
boolSelfIntIdx = X[:,0] != X[:,1]
X = X[boolSelfIntIdx]
classProba = classifier.predict(X[:,[n+3 for n in range(nMetrices)]])
#boolPredIdx = classProba >= probCutoff
#boolIdx = np.sum(boolPredIdx,axis=1) > 0
predX = np.append(X[:,2],classProba.reshape(X.shape[0],-1),axis=1)
np.save(
file = pathToChunk,
arr = predX)
return predX
def _load(self, X):
"""
Intitiates data.
Parameters
----------
X pd.DataFrame
Returns
-------
None
Raises
-------
ValueError if X is not a pandas data frame.
"""
if isinstance(X, pd.DataFrame):
self.X = X
if not self.params["indexIsID"]:
print("Info :: Checking for duplicates")
dupRemoved = self.X.drop_duplicates(subset=[self.params["idColumn"]])
if dupRemoved.index.size < self.X.index.size:
print("Warning :: Duplicates detected.")
print("File contained duplicate ids which will be removed: {}".format(self.X.index.size-dupRemoved.index.size))
self.X = dupRemoved
self.X = self.X.set_index(self.params["idColumn"])
self.X = self.X.astype(np.float32)
else:
self.X = self.X.loc[self.X.index.drop_duplicates()] #remove duplicaates
self.X = self.X.astype(np.float32) #set dtype to 32 to save memory
if self.params["takeRondomSampleFromData"] != False and self.params["takeRondomSampleFromData"] > 50:
self.X = self.X.sample(self.params["takeRondomSampleFromData"])
print("Random samples taken from data. New data size {}".format(self.X.index.size))
self.params["rawData"][self.currentAnalysisName] = self.X.copy()
else:
raise ValueError("X must be a pandas data frame")
def _loadReferenceDB(self):
"""
Load reference database.
filterDB (dict) is passed to the pandas pd.DataFrame.isin function.
Parameters
----------
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("noDistanceCalculationAndPrediction was enabled. No database laoded.")
return
if self.params["noDatabaseForPredictions"]:
print("Info :: Parameter noDatabaseForPredictions was set to True. No database laoded.")
return
print("Info :: Load positive set from data base")
if not hasattr(self,"DB"):
self.DB = Database(nJobs = self.params["n_jobs"], splitString=self.params["databaseEntrySplitString"])
pathToDatabase = os.path.join(self.params["pathToComb"], "InteractionDatabase.txt")
if os.path.exists(pathToDatabase):
dbSize = self.DB.loadDatabaseFromFile(pathToDatabase)
print("Info :: Database found and loaded. Contains {} positive interactions.".format(dbSize))
# self._addMetricToStats("nPositiveInteractions",dbSize)
else:
self.DB.pariwiseProteinInteractions(
self.params["databaseIDColumn"],
dbID = self.params["databaseFileName"],
filterDb=self.params["databaseFilter"])
entryList = []
for analysisName in self.params["analysisName"]:
entryList.extend([entryID for entryID,Signal in self.Signals[analysisName].items() if Signal.valid])
entryList = np.unique(np.array(entryList).flatten())
print("Info :: Features used for filtering: {}".format(len(entryList)))
dbSize = self.DB.filterDBByEntryList(entryList)
#add decoy to db
if dbSize == 0:
raise ValueError("Warning :: No hits found in database. Check dabaseFilter keyword.")
elif dbSize < 150:
raise ValueError("Warining :: Less than 150 pairwise interactions found.")
elif dbSize < 200:
#raise ValueError("Filtered positive database contains less than 200 interactions..")
print("Warning :: Filtered positive database contains less than 200 interactions.. {}".format(dbSize))
print("Warning :: Please check carefully, if the classifier has enough predictive power.")
self.DB.addDecoy(sizeFraction=self.params["decoySizeFactor"])
self.DB.df.to_csv(pathToDatabase,sep="\t")
print("Info :: Database saved to {}".format(pathToDatabase))
def _checkGroups(self):
"Checks grouping. For comparision of multiple co-elution data sets."
if isinstance(self.params["grouping"],dict):
if len(self.params["grouping"]) == 0:
raise ValueError("Example for grouping : {'KO':['KO_01.txt','KO_02.txt'], 'WT':['WT_01.txt','WT_02.txt'] } Aborting.. ")
else:
combinedSamples = sum(self.params["grouping"].values(), [])
if all(x in combinedSamples for x in self.params["analysisName"]):
print("Grouping checked..\nAll columnSuffixes found in grouping.")
print("If you are using the combat format, the grouping has to be named as '<combatFileName><sample name>")
else:
raise ValueError("Could not find all grouping names in loaded dataframe.. Aborting ..")
def _findPeaks(self, n_jobs=3):
"""
Initiates for each feature in the data a Signal instance.
Peak detection and modelling is then performed.
Results are saved to hard drive for each run.
Numerous parameters effect signal modelling (smoothing, maxPeaks, r2Thresh, ...)
Create self.Signals (OrderedDict) which is a dict. Key = analysisName, which
contains another dict with entries as keys and values are of type Signal class.
Parameters
----------
n_jobs int. Number of worker processes.
Returns
-------
None
"""
if self.allSamplesFound:
print("Info :: Signals loaded and found. Proceeding ...")
return
pathToSignal = os.path.join(self.params["pathToComb"],"signals.lzma")
if os.path.exists(pathToSignal):
self.Signals = load(pathToSignal)
print("\nLoading pickled signal intensity")
if all(analysisName in self.Signals for analysisName in self.params["analysisName"]):
print("Info :: All samples found in loaded Signals..")
self.allSamplesFound = True
return
if not hasattr(self , "Signals"):
self.Signals = OrderedDict()
self.Signals[self.currentAnalysisName] = dict()
peakModel = self.params['peakModel']
for entryID, signal in self.X.iterrows():
self.Signals[self.currentAnalysisName][entryID] = Signal(signal.values,
ID = entryID,
peakModel = peakModel,
smoothSignal = self.params["smoothSignal"],
savePlots = self.params["plotSignalProfiles"],
savePeakModels = self.params["savePeakModels"],
maxPeaks = self.params["maxPeaksPerSignal"],
metrices = self.params["metrices"],
pathToTmp = self.params["pathToTmp"][self.currentAnalysisName],
normalizationValue = self.params["normValueDict"][entryID] if entryID in self.params["normValueDict"] else None,
removeSingleDataPointPeaks = self.params["removeSingleDataPointPeaks"],
analysisName = self.currentAnalysisName,
r2Thresh = self.params["r2Thresh"],
smoothRollingWindow = self.params["smoothWindow"],
minDistanceBetweenTwoPeaks = self.params["minDistanceBetweenTwoPeaks"],
minPeakHeightOfMax = self.params["minPeakHeightOfMax"])
t1 = time.time()
print("\n\nStarting Signal modelling .. (n_jobs = {})".format(n_jobs))
fittedModels = Parallel(n_jobs=n_jobs, verbose=1)(delayed(Signal.fitModel)() for Signal in self.Signals[self.currentAnalysisName].values())
self._addModelToSignals(fittedModels)
self._saveSignalFitStatistics()
print("Peak fitting done time : {} secs".format(round((time.time()-t1))))
print("Each feature's fitted models is stored as pdf and txt is stored in model plots (if savePeakModels and plotSignalProfiles was set to true)")
def _saveSignals(self):
""
if hasattr(self,"Signals") :
pathToSignal = os.path.join(self.params["pathToComb"],"signals.lzma")
dump(self.Signals.copy(),pathToSignal)
self.Xs = OrderedDict()
for analysisName in self.params["analysisName"]:
pathToFile = os.path.join(self.params["pathToTmp"][analysisName],"validProcessedSignals({}).txt".format(analysisName))
signals = self.Signals[analysisName]
validSignalData = dict([(k,v.Y) for k,v in signals.items() if v.valid and v.validModel])
fitDataSignal = dict([(k,v.fitSignal.flatten()) for k,v in signals.items() if v.valid and v.validModel and v.fitSignal is not None])
dfProcessedSignal = pd.DataFrame().from_dict(validSignalData,orient="index")
dfFit = pd.DataFrame().from_dict(fitDataSignal, orient="index")
if self.params["removeSingleDataPointPeaks"]:
numberofPeaks = dict([(k,v.removedDataPoints) for k,v in signals.items() if v.valid and v.validModel and v.fitSignal is not None])
nRemovedData = pd.DataFrame().from_dict(numberofPeaks,orient="index")
nRemovedData.columns = ["#removedDataPoints"]
dfFit = dfFit.join(nRemovedData)
#print(self.params["rawData"][analysisName].index)
df = dfProcessedSignal.join(self.params["rawData"][analysisName],rsuffix="_raw",lsuffix="_processed")
df = df.join(dfFit,rsuffix = "_fit")
df.to_csv(pathToFile,sep="\t")
self.Xs[analysisName] = dfProcessedSignal
X = self.Xs[analysisName].reset_index()
np.save(os.path.join(self.params["pathToTmp"][analysisName],"source.npy"),X.values)
for analysisName in self.params["analysisName"]:
#clean invalid signals
if self.params["keepOnlySignalsValidInAllConditions"]:
toDelete = [k for k,v in self.Signals[analysisName].items() if not all(k in self.Signals[analysisName] and self.Signals[analysisName][k].valid for analysisName in self.params["analysisName"])]
else:
toDelete = [k for k,v in self.Signals[analysisName].items() if not v.valid]
#delete Signals that do match criteria
for k in toDelete:
del self.Signals[analysisName][k]
def _calculateDistance(self):
"""
Calculates Distance between protein protein pairs based
on their signal profile.
Parameters
----------
signalModels - list
List of modelfits (dict)
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("noDistanceCalculationAndPrediction was enabled. Skipping Distance Calculations.")
return
global entriesInChunks
print("\nStarting Distance Calculation ...")
t1 = time.time()
chunks = self.signalChunks[self.currentAnalysisName]
#return
entrieChunkPath = os.path.join(self.params["pathToComb"], "entriesInChunk.pkl")
if not self.params["recalculateDistance"] and all(os.path.exists(x.replace(".pkl",".npy")) for x in chunks) and os.path.exists(entrieChunkPath):
print("All chunks found for distance calculation.")
if not self.entriesInChunkLoaded:
with open(os.path.join(self.params["pathToComb"], "entriesInChunk.pkl"),"rb") as f:
entriesInChunks = pickle.load(f)
self.entriesInChunkLoaded = True
else:
chunkItems = Parallel(n_jobs=self.params["n_jobs"], verbose=10)(delayed(calculateDistanceP)(c) for c in chunks)
entriesInChunks[self.currentAnalysisName] = {}
for k,v in chunkItems:
for E1E2 in v:
entriesInChunks[self.currentAnalysisName][E1E2] = k
with open(os.path.join(self.params["pathToComb"], "entriesInChunk.pkl"),"wb") as f:
pickle.dump(entriesInChunks,f)
print("Distance computing/checking: {} secs\n".format(round(time.time()-t1)))
def _createSignalChunks(self,chunkSize = 30):
"""
Creates signal chunks at given chunk size.
Parameter
---------
chunkSize - int. default 30. Nuber of signals in a single chunk.
Returns
-------
list of paths to the saved chunks.
"""
pathToSignalChunk = os.path.join(self.params["pathToComb"],"signalChunkNames.lzma")
if os.path.exists(pathToSignalChunk) and not self.params["recalculateDistance"]:
self.signalChunks = load(pathToSignalChunk)
print("Info :: Signal chunks loaded and found. Checking if all runs are present.")
if all(analysisName in self.signalChunks for analysisName in self.params["analysisName"]):
print("Info :: Checked... all samples found.")
return
else:
print("Info :: Not all samples found. Creating new signal chunks..")
if not hasattr(self,"signalChunks"):
self.signalChunks = dict()
else:
self.signalChunks.clear()
for analysisName in self.params["analysisName"]:
print("Info :: {} signal chunk creation started.\nThis may take some minutes.." .format(analysisName))
if "umap-dist" in self.params["metrices"]:
#umap dist calculations
print("Info :: Calculation UMAP.")
embed = umap.UMAP(min_dist=0.0000000000001, n_neighbors=5, metric = "correlation", random_state=56).fit_transform(minMaxNorm(self.Xs[analysisName].values,axis=1))
embed = pd.DataFrame(embed,index=self.Xs[analysisName].index)
#save embedding
embed.to_csv(os.path.join(self.params["pathToTmp"][analysisName],"chunks","embeddings.txt"),sep="\t")
signals = list(self.Signals[analysisName].values())
for n,Signal in enumerate(self.Signals[analysisName].values()):
setattr(Signal,"otherSignals", signals[n:])
c = []
for n,chunk in enumerate(chunks(signals,chunkSize)):
pathToChunk = os.path.join(self.params["pathToTmp"][analysisName],"chunks",str(n)+".pkl")
#if not os.path.exists(pathToChunk) and not self.params["recalculateDistance"]:
chunkItems = [
{
"ID" : str(signal.ID),
"chunkName" : str(n),
"Y" : np.array(signal.Y),
"ownPeaks" : signal.getPeaksAndsIDs(),
"otherSignalPeaks" : [s.getPeaksAndsIDs() for s in signal.otherSignals],
"E2" : [str(s.ID) for s in signal.otherSignals],
"metrices" : self.params["metrices"],
"pathToTmp" : self.params["pathToTmp"][analysisName],
"correlationWindowSize" : self.params["correlationWindowSize"],
"embedding" : embed.loc[signal.ID].values if "umap-dist" in self.params["metrices"] else [],
} for signal in chunk]
with open(pathToChunk,"wb") as f:
pickle.dump(chunkItems,f)
c.append(pathToChunk)
self.signalChunks[analysisName] = [p for p in c if os.path.exists(p)] #
#saves signal chunls.
dump(self.signalChunks,pathToSignalChunk)
def _collectRSquaredAndFitDetails(self):
"""
Data are collected from txt files in the modelPlots folder.
"""
if not self.params["savePeakModels"]:
print("!! Warning !! This parameter is depracted and from now on always true.")
self.params["savePeakModels"] = True
pathToPlotFolder = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result","modelPlots")
resultFolder = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result")
fittedPeaksPath = os.path.join(resultFolder,"fittedPeaks_{}.txt".format(self.currentAnalysisName))
nPeaksPath = os.path.join(resultFolder,"nPeaks.txt")
if os.path.exists(fittedPeaksPath) and os.path.exists(nPeaksPath):
print("Warning :: FittedPeaks detected. If you changed the data, you have to set the paramter 'restartAnalysis' True to include changes..")
return
if not os.path.exists(resultFolder):
os.mkdir(resultFolder)
#number of peaks
collectNumbPeaks = []
data = [{"Key":signal.ID,
"ID" : n,
"R2":signal.Rsquared,
"#Peaks":len(signal.modelledPeaks),
"Center":peakParam["mu"],
"Amplitude":peakParam["A"],
"Sigma":peakParam["sigma"],
"fwhm":peakParam["fwhm"],
"height" : peakParam["height"],
"AUC" : peakParam["AUC"],
"relAUC" : peakParam["relAUC"],
"validModel":signal.validModel,
"validData":signal.validData,
"Y": ",".join([str(round(x,3)) for x in peakParam["Y"]])} for signal in self.Signals[self.currentAnalysisName].values() if signal.valid for n,peakParam in enumerate(signal.modelledPeaks)]
df = pd.DataFrame().from_dict(data)
df.to_csv(fittedPeaksPath,sep="\t",index=None)
# # find peak properties..
# df = pd.DataFrame(columns=["Key","ID","Amplitude","Center","Sigma","fwhm","height","auc"])
# for file in os.listdir(pathToPlotFolder):
# if file.endswith(".txt"):
# try:
# dfApp = pd.read_csv(os.path.join(pathToPlotFolder,file), sep="\t")
# df = df.append(dfApp)
# collectNumbPeaks.append({"Key":dfApp["Key"].iloc[0],"N":len(dfApp.index)})
# except:
# continue
#pd.DataFrame(collectNumbPeaks).to_csv(nPeaksPath,sep="\t", index = None)
def _trainPredictor(self, addImpurity = 0.3, apexTraining = False):
"""
Trains the predictor based on positive interactions
in the database.
Parameters
----------
Returns
-------
None
"""
#metricColumns = [col for col in self.DB.df.columns if any(x in col for x in self.params["metrices"])]
if self.params["noDatabaseForPredictions"] or self.params["noDistanceCalculationAndPrediction"]:
print("Predictor training skipped (noDatabaseForPredictions = True or noDistanceCalculationAndPrediction = True). Distance metrices/Raw signals are used for dimensional reduction.")
return
folderToResults = [os.path.join(self.params["pathToTmp"][analysisName],"result") for analysisName in self.params["analysisName"]]
classifierFileName = os.path.join(self.params["pathToComb"],'trainedClassifier_{}.sav'.format(self.params["classifierClass"]))
if not self.params["retrainClassifier"] and os.path.exists(classifierFileName): #enumerate(
print("Info :: Prediction was done already... loading file")
self.classifier = joblib.load(classifierFileName)
return
metricColumnsForPrediction = self.params["metrices"]
totalColumns = metricColumnsForPrediction + ['Class',"E1E2"]
data = [self.DB.dfMetrices[analysisName][totalColumns].dropna(subset=metricColumnsForPrediction) for analysisName in self.params["analysisName"]]
data = pd.concat(data, ignore_index=True)
dataForTraining = data[["E1E2","Class"] + metricColumnsForPrediction]
dataForTraining["Class"] = dataForTraining["Class"].astype(np.float64)
print("Info :: Merging database metrices.")
print("Test size for classifier: {}".format(self.params["classifierTestSize"]))
if apexTraining and "apex" in totalColumns:
print("Info :: Performing apex based pooling.")
dataForTraining = dataForTraining.sort_values("apex").drop_duplicates("E1E2")
else:
dataForTraining = dataForTraining.groupby(dataForTraining['E1E2']).aggregate("min")
dataForTraining['Class'] = dataForTraining['Class'].astype(np.int64)
dataForTraining = dataForTraining.reset_index()
print("Info :: Using a total of {} features for classifier training.".format(dataForTraining.index.size))
if addImpurity > 0:
nRows = dataForTraining.index.size
rowIdx = np.random.choice(nRows,int(nRows * addImpurity),replace=False)#np.random.randint(0,nRows,size=int(nRows * addImpurity))
print(dataForTraining.loc[rowIdx,'Class'] ^ 1)
dataForTraining.loc[rowIdx,'Class'] = dataForTraining.loc[rowIdx,'Class'] ^ 1
print("Warning :: Stop! Using impurity for the training data is not advisable other than for testing. You should probably not do this?")
Y = dataForTraining['Class'].values
X = dataForTraining.loc[:,metricColumnsForPrediction].values
self.classifier = Classifier(
classifierClass = self.params["classifierClass"],
n_jobs=self.params['n_jobs'],
gridSearch = self.params["classiferGridSearch"],
testSize = self.params["classifierTestSize"])
probabilites, meanAuc, stdAuc, oobScore, optParams, Y_test, Y_pred = self.classifier.fit(X,Y,kFold=self.params["kFold"],pathToResults=self.params["pathToComb"], metricColumns = metricColumnsForPrediction)
dataForTraining["PredictionClass"] = probabilites
#save prediction summary
pathToFImport = os.path.join(self.params["pathToComb"],"PredictorSummary{}_{}.txt".format(self.params["metrices"],self.params["addImpurity"]))
#create and save classification report
classReport = classification_report(
Y_test,
Y_pred,
digits=3,
output_dict=True)
classReport = OrderedDict([(k,v) for k,v in classReport.items() if k != 'accuracy'])
pd.DataFrame().from_dict(classReport, orient="index").to_csv(pathToFImport, sep="\t", index=True)
#save database prediction
dataForTraining.to_csv(os.path.join(self.params["pathToComb"],"DBpred({}).txt".format(self.params["addImpurity"])),sep="\t", index=False)
self._plotFeatureImportance(self.params["pathToComb"])
joblib.dump(self.classifier, classifierFileName)
self._addMetricToStats("Metrices",str(metricColumnsForPrediction))
self._addMetricToStats("OOB_Score",oobScore)
self._addMetricToStats("ROC_Curve_AUC","{}+-{}".format(meanAuc,stdAuc))
self._addMetricToStats("ClassifierParams",optParams)
print("DB prediction saved - DBpred.txt :: Classifier pickled and saved 'trainedClassifier.sav'")
def _loadPairsForPrediction(self):
""
#load chunks that were saved
chunks = [f for f in os.listdir(os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"chunks")) if f.endswith(".npy") and f != "source.npy"]
print("\nInfo :: Prediction/Dimensional reduction started...")
for chunk in chunks:
X = np.load(os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"chunks",chunk),allow_pickle=True)
yield (X,len(chunks))
def _predictInteractions(self):
""
if self.params["noDatabaseForPredictions"] or self.params["noDistanceCalculationAndPrediction"]:
print("Info :: Skipping predictions. (noDatabaseForPredictions = True or noDistanceCalculationAndPrediction = True)")
return
paramDict = {"NumberInteractions" : 0, "positiveInteractors" : 0, "decoyInteractors" : 0, "novelInteractions" : 0, "interComplexInteractions" : 0}
probCutoffs = dict([(cutoff,paramDict.copy()) for cutoff in np.linspace(0.0,0.99,num=30)])
print("Info :: Starting prediction ..")
folderToOutput = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result")
pathToPrediction = os.path.join(folderToOutput,"predictedInteractions{}_{}_{}.txt".format(self.params["metricesForPrediction"],self.params["classifierClass"],self.params["addImpurity"]))
if False and not self.params["retrainClassifier"] and os.path.exists(pathToPrediction):
predInts = pd.read_csv(pathToPrediction, sep="\t")
self.stats.loc[self.currentAnalysisName,"nInteractions ({})".format(self.params["interactionProbabCutoff"])] = predInts.index.size
return predInts
# del self.Signals
#gc.collect()
#create prob columns of k fold
pColumns = ["Prob_{}".format(n) for n in range(len(self.classifier.predictors))]
dfColumns = ["E1","E2","E1E2","apexPeakDist"] + [x if not isinstance(x,dict) else x["name"] for x in self.params["metrices"]] + pColumns + ["In DB"]
if not os.path.exists(folderToOutput):
os.mkdir(folderToOutput)
predInteractions = None
metricIdx = [n + 4 if "apex" in self.params["metrices"] else n + 3 for n in range(len(self.params["metrices"]))] #in order to extract from dinstances, apex creates an extra column (apex_dist)
for n,(X,nChunks) in enumerate(self._loadPairsForPrediction()):
boolSelfIntIdx = X[:,0] == X[:,1]
if n % 5 == 0:
percDone = round(n/nChunks*100,1)
print(percDone,r"%")
X = X[boolSelfIntIdx == False]
#first two rows E1 E2, and E1E2, apexPeakDist remove before predict
if X.shape[0] > 0:
classProba = self.classifier.predict(X[:,metricIdx])
else:
continue
if classProba is None:
continue
predX = np.append(X,classProba.reshape(X.shape[0],-1),axis=1)
interactionClass = self.DB.getInteractionClassByE1E2(X[:,2],X[:,0],X[:,1])
for cutoff in probCutoffs.keys():
boolPredIdx = classProba >= cutoff
if len(boolPredIdx.shape) > 1:
boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
else:
boolIdx = boolPredIdx
counts = interactionClass.loc[boolIdx].value_counts()
n = np.sum(boolIdx)
probCutoffs[cutoff]["NumberInteractions"] += n
probCutoffs[cutoff]["positiveInteractors"] += counts["pos"] if "pos" in counts.index else 0
probCutoffs[cutoff]["decoyInteractors"] += counts["decoy"] if "decoy" in counts.index else 0
probCutoffs[cutoff]["novelInteractions"] += counts["unknown/novel"] if "unknown/novel" in counts.index else 0
probCutoffs[cutoff]["interComplexInteractions"] += counts["inter"] if "inter" in counts.index else 0
boolPredIdx = classProba >= self.params["interactionProbabCutoff"]
if len(boolPredIdx.shape) > 1:
boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
else:
boolIdx = boolPredIdx
predX = np.append(predX,interactionClass.values.reshape(predX.shape[0],1),axis=1)
if predInteractions is None:
predInteractions = predX[boolIdx,:]
else:
predInteractions = np.append(predInteractions,predX[boolIdx,:], axis=0)
probData = pd.DataFrame().from_dict(probCutoffs, orient="index")
probData["FalseNegatives"] = probData["positiveInteractors"].iloc[0] - probData["positiveInteractors"]
probData["precision"] = (probData["positiveInteractors"]) / (probData["positiveInteractors"] + probData["interComplexInteractions"] + probData["decoyInteractors"])
probData["recall"] = (probData["positiveInteractors"]) / (probData["positiveInteractors"] + probData["FalseNegatives"])
probData["F1-measure"] = 2 * ((probData["precision"] * probData["recall"]) / (probData["precision"] + probData["recall"]))
probData["F-measure(b=2)"] = (1+2**2) * ((probData["precision"] * probData["recall"]) / (((2**2) * probData["precision"]) + probData["recall"]))
probData["F-measure(b=0.5)"] = (1+0.5**2)* ((probData["precision"] * probData["recall"]) / (((0.5**2) * probData["precision"]) + probData["recall"]))
#self.params["interactionProbabCutoff"] = float(probData.idxmax().loc["F1-measure"])
print("Info :: Interaction probability was set to: {} based on the F-metric using beta = 1.".format(self.params["interactionProbabCutoff"] ))
# boolPredIdx = classProba >= self.params["interactionProbabCutoff"]
# if len(boolPredIdx.shape) > 1:
# boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
# else:
# boolIdx = boolPredIdx
probData.to_csv(os.path.join(folderToOutput,"classiferPerformanceMetrics_{}_addImp{}.txt".format(self.params["classifierClass"],self.params["addImpurity"])),sep="\t")
# print("Interactions > cutoff :", predInteractions.shape[0])
# print("Info :: Finding interactions in DB")
# boolDbMatch = np.isin(predInteractions[:,2],self.DB.df["E1E2"].values, assume_unique=True)
# print("Info :: Appending matches.")
# predInteractions = np.append(predInteractions,boolDbMatch.reshape(predInteractions.shape[0],1),axis=1)
d = pd.DataFrame(predInteractions, columns = dfColumns)
print("Info :: Number of interactions detected: {} at cut-off {}".format(d.index.size,self.params["interactionProbabCutoff"]))
boolDbMatch = d["In DB"] == "pos"
print("Info :: Annotate complexes to pred. interactions.")
d["ComplexID"], d["ComplexName"] = zip(*[self._attachComplexID(_bool,E1E2) for E1E2, _bool in zip(predInteractions[:,2], boolDbMatch)])
d = self._attachPeakIDtoEntries(d)
# boolIdx = d[pColumns[0]] > self.params["interactionProbabCutoff"]
# d = d.loc[boolIdx]
origSize = d.index.size
print("Info : Filter for at least {} times in predicted interactions".format(self.params["minimumPPsPerFeature"]))
if self.params["usePeakCentricFeatures"]:
eColumns = ["E1p","E2p"]
else:
eColumns = ["E1","E2"]
Es = pd.Series(d[eColumns].values.flatten())
EsCounted = Es.value_counts()
boolIdx = EsCounted >= self.params["minimumPPsPerFeature"]
duplicatedPPs = EsCounted.index[boolIdx]
d = d.loc[d[eColumns].isin(duplicatedPPs).all(axis="columns")]
print("Removed interactions {}".format(origSize-d.index.size))
d.to_csv(pathToPrediction, sep="\t", index=False)
self.stats.loc[self.currentAnalysisName,"nInteractions ({})".format(self.params["interactionProbabCutoff"])] = d.index.size
self.stats.loc[self.currentAnalysisName,"Classifier"] = self.params["classifierClass"]
return d
def _attachComplexID(self,_bool,E1E2):
""
if not _bool:
return ("","")
else:
df = self.DB.df[self.DB.df["E1E2"] == E1E2]
return (';'.join([str(x) for x in df["ComplexID"].tolist()]),
';'.join([str(x) for x in df["complexName"].tolist()]))
def _plotChunkSummary(self, data, fileName, folderToOutput):
"util fn"
data[self.params["metrices"]] = self.classifier._scaleFeatures(data[self.params["metrices"]].values)
fig, ax = plt.subplots()
XX = data.melt(id_vars = [x for x in data.columns if x not in self.params["metrices"]],value_vars=self.params["metrices"])
sns.boxplot(data = XX, ax=ax, y = "value", x = "variable", hue = "Class")
plt.savefig(os.path.join(folderToOutput,"{}.pdf".format(fileName)))
plt.close()
def _plotFeatureImportance(self,folderToOutput,*args,**kwargs):
"""
Creates a bar chart showing the estimated feature importances
Parameters
----------
folderToOutput : string
Path to folder to save the pdf. Will be created if it does not exist.
*args
Variable length argument list passed to matplotlib.bar.
**kwargs
Arbitrary keyword arguments passed to matplotlib.bar.
Returns
-------
None
"""
fImp = self.classifier.getFeatureImportance()
self._makeFolder(folderToOutput)
if fImp is not None:
#save as txt file
pd.DataFrame(fImp, columns= self.params["metrices"]).to_csv(os.path.join(folderToOutput,"featureImportance{}.txt".format(self.params["metrices"])), sep="\t")
#plot feature importance
fig, ax = plt.subplots()
xPos = np.arange(len(self.params["metrices"]))
ax.bar(x = xPos, height = np.mean(fImp,axis=0), *args,**kwargs)
ax.errorbar(x = xPos, y = np.mean(fImp,axis=0), yerr = np.std(fImp,axis=0))
ax.set_xticks(xPos)
ax.set_xticklabels(self.params["metrices"], rotation = 45)
plt.savefig(os.path.join(folderToOutput,"featureImportance.pdf"))
plt.close()
def _randomStr(self,n):
"""
Returns a random string (lower and upper case) of size n
Parameters
----------
n : int
Length of string
Returns
-------
random string of length n
"""
letters = string.ascii_lowercase + string.ascii_uppercase
return "".join(random.choice(letters) for i in range(n))
def _scoreComplexes(self, complexDf, complexMemberIds = "subunits(UniProt IDs)", beta=2.5):
""
entryPositiveComplex = [self.DB.assignComplexToProtein(str(e),complexMemberIds,"ComplexID") for e in complexDf.index]
complexDf.loc[:,"ComplexID"] = entryPositiveComplex
matchingResults = pd.DataFrame(columns = ["Entry","Cluster Labels","Complex ID", "NumberOfInteractionsInDB"])
clearedEntries = pd.Series([x.split("_")[0] for x in complexDf.index], index=complexDf.index)
for c,d in self.DB.indentifiedComplexes.items():
boolMatch = clearedEntries.isin(d["members"])
clusters = complexDf.loc[boolMatch,"Cluster Labels"].values.flatten()
nEntriesMatch = np.sum(boolMatch)
if nEntriesMatch > 1:
groundTruth = [c] * nEntriesMatch
matchingResults = matchingResults.append(pd.DataFrame().from_dict({"Entry":complexDf.index[boolMatch].values,
"Cluster Labels" : clusters,
"Complex ID": groundTruth,
"NumberOfInteractionsInDB" : [d["n"]] * nEntriesMatch}) ,ignore_index=True)
if not matchingResults.empty:
score = v_measure_score(matchingResults["Complex ID"],matchingResults["Cluster Labels"],beta = beta)
else:
score = np.nan
return complexDf , score, matchingResults
def _clusterInteractions(self, predInts, clusterMethod = "HDBSCAN", plotEmbedding = True, groupFiles = [], combineProbs = True, groupName = ""):
"""
Performs dimensional reduction and clustering of prediction distance matrix over a defined parameter grid.
Parameter
predInts - ndarray.
clusterMethod - string. Any string of ["HDBSCAN",]
plotEmbedding - bool. If true, embedding is plotted and save to pdf and txt file.
returns
None
"""
embedd = None
bestDf = None
topCorrFeatures = None
splitLabels = False
recordScore = OrderedDict()
saveEmbeddings = []
maxScore = np.inf
metricColumns = [x if not isinstance(x,dict) else x["name"] for x in self.params["metricesForPrediction"]]
cb = ComplexBuilder(method=clusterMethod)
print("\nPredict complexes")
if predInts is None:
print("No database provided. UMAP and clustering will be performed using defaultKwargs. (noDatabaseForPredictions = True)")
pathToFolder = self._makeFolder(self.params["pathToComb"],"complexIdentification_{}".format(self.params["addImpurity"]))
if not self.params["databaseHasComplexAnnotations"] and not self.params["noDatabaseForPredictions"] and predInts is not None:
print("Database does not contain complex annotations. Therefore standard UMAP settings are HDBSCAN settings are used for complex identification.")
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd = cb.fit(predInts,
metricColumns = metricColumns,
scaler = self.classifier._scaleFeatures,
umapKwargs= self.params["umapDefaultKwargs"])
elif self.params["noDistanceCalculationAndPrediction"] or self.params["noDatabaseForPredictions"]:
print("Info :: No database given for complex scoring. UMAP and HDBSCAN are performed to identify complexes.")
alignedEmbeddings = OrderedDict()
if len(self.Xs) > 1:
#correlate with each other
firstKey = list(self.Xs.keys())[0]
corrDfs = [self.Xs[firstKey].corrwith(df,axis=1,drop=True) for k,df in self.Xs.items() if k != firstKey]
mergedDf = pd.concat(corrDfs,join="inner",axis=1).mean(axis=1).sort_values(ascending=False)
topCorrFeatures = mergedDf.head(self.params["topNCorrFeaturesForUMAPAlignment"]).index
dataSets = [minMaxNorm(X.values,axis=1) for X in self.Xs.values()]
relations = []
for k,v in self.Xs.items():
if k != firstKey:
relationDict = dict([(self.Xs[prevKey].index.get_loc(idx),v.index.get_loc(idx)) for idx in topCorrFeatures])
relations.append(relationDict)
prevKey = k
print("Info :: Computing aligned UMAP using top correlated features.")
aligned_mapper = umap.aligned_umap.AlignedUMAP(**self.params["umapDefaultKwargs"]).fit(dataSets, relations=relations)
for n,umapE in enumerate(aligned_mapper.embeddings_):
key = list(self.Xs.keys())[n]
df = pd.DataFrame(umapE, index=self.Xs[key].index, columns = ["E({})_0".format(key),"E({})_0".format(key)])
alignedEmbeddings[key] = df.copy()
for analysisName in self.params["analysisName"]:
if self.params["useRawDataForDimensionalReduction"]:
print("Info :: Using raw intensity data for dimensional reduction. Not calculated distances")
if self.params["scaleRawDataBeforeDimensionalReduction"]:
X = self.Xs[analysisName]
predInts = pd.DataFrame(minMaxNorm(X.values,axis=1), index=X.index, columns = ["scaled_({})_{}".format(analysisName,colName) for colName in X.columns]).dropna()
else:
predInts = self.Xs[analysisName]
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
preCompEmbedding = alignedEmbeddings[analysisName] if analysisName in alignedEmbeddings else None,
metricColumns = self.X.columns,
scaler = None,
umapKwargs = self.params["umapDefaultKwargs"],
generateSquareMatrix = False,
)
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels":clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by="Cluster Labels")
df = df.set_index("Entry")
predInts.to_csv(os.path.join(pathToFolder,"predInts_{}.txt".format(analysisName)))
else:
predInts = self._loadAndFilterDistanceMatrix()
predInts[metricColumns] = minMaxNorm(predInts[metricColumns].values,axis=0)
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
metricColumns = metricColumns,
scaler = None,
poolMethod= "min",
umapKwargs = self.params["umapDefaultKwargs"],
generateSquareMatrix = True,
)
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels({})".format(analysisName):clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by="Cluster Labels")
df = df.set_index("Entry")
if pooledDistances is not None:
pooledDistances.to_csv(os.path.join(pathToFolder,"PooledDistance_{}.txt".format(self.currentAnalysisName)),sep="\t")
squaredDf = pd.DataFrame(matrix,columns=intLabels,index=intLabels).loc[df.index,df.index]
squaredDf.to_csv(os.path.join(pathToFolder,"SquaredSorted_{}.txt".format(self.currentAnalysisName)),sep="\t")
noNoiseIndex = df.index[df["Cluster Labels"] > 0]
squaredDf.loc[noNoiseIndex,noNoiseIndex].to_csv(os.path.join(pathToFolder,"NoNoiseSquaredSorted_{}.txt".format(self.currentAnalysisName)),sep="\t")
splitLabels = True
if embedd is not None and plotEmbedding:
#save embedding
dfEmbed = pd.DataFrame(embedd, columns = ["UMAP_{}_0{}".format(analysisName,n) for n in range(embedd.shape[1])])
dfEmbed["clusterLabels({})".format(analysisName)] = clusterLabels
dfEmbed["labels({})".format(analysisName)] = intLabels
if splitLabels:
dfEmbed["sLabels"] = dfEmbed["labels"].str.split("_",expand=True).values[:,0]
dfEmbed = dfEmbed.set_index("sLabels")
else:
dfEmbed = dfEmbed.set_index("labels({})".format(analysisName))
if self.params["scaleRawDataBeforeDimensionalReduction"] and self.params["useRawDataForDimensionalReduction"]:
dfEmbed = dfEmbed.join([self.Xs[self.currentAnalysisName],predInts],lsuffix="_",rsuffix="__")
else:
dfEmbed = dfEmbed.join(self.Xs[self.currentAnalysisName])
if topCorrFeatures is not None:
dfEmbed["FeatureForUMAPAlign"] = dfEmbed.index.isin(topCorrFeatures)
saveEmbeddings.append(dfEmbed)
dfEmbed.to_csv(os.path.join(pathToFolder,"UMAP_Embedding_{}.txt".format(analysisName)),sep="\t")
#plot embedding.
fig, ax = plt.subplots()
ax.scatter(embedd[:,0],embedd[:,1],s=12, c=clusterLabels, cmap='Spectral')
plt.savefig(os.path.join(pathToFolder,"E({}).pdf".format(analysisName)))
plt.close()
pd.concat(saveEmbeddings,axis=1).to_csv(os.path.join(pathToFolder,"concatEmbeddings.txt"),sep="\t")
else:
embedd = None
if len(groupFiles) > 0:
groupMetricColumns = ["Prob_0_({})".format(analysisName) for analysisName in groupFiles]
# print(groupMetricColumns)
usePeaks = self.params["usePeakCentricFeatures"]
print("Using peaks for clustering.")
print(groupMetricColumns)
if usePeaks:
# if len(groupFiles) > 0:
eColumns = ["E1p_({})".format(groupFiles[0]),"E2p_({})".format(groupFiles[0])]
predInts = predInts[groupMetricColumns + eColumns + ["E1E2"]]
else:
predInts = predInts[groupMetricColumns + ["E1","E2","E1E2"]]
eColumns = ["E1","E2"]
#
predInts.dropna(subset=groupMetricColumns,inplace=True,thresh=1)
for n, params in enumerate(list(ParameterGrid(CLUSTER_PARAMS[clusterMethod]))):
try:
cb.set_params(params)
if clusterMethod == "HDBSCAN":
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
metricColumns = groupMetricColumns,#,#[colName for colName in predInts.columns if "Prob_" in colName],
scaler = None,#self.classifier._scaleFeatures, #
inv = True, # after pooling by poolMethod, invert (1-X)
poolMethod="max",
preCompEmbedding = None,
entryColumns = eColumns
)
else:
clusterLabels, intLabels, matrix , reachability, core_distances = cb.fit(predInts,
metricColumns = [colName for colName in predInts.columns if "Prob_" in colName],
scaler = self.classifier._scaleFeatures)
# clusterLabels, intLabels, matrix , reachability, core_distances = cb.fit(predInts, metricColumns = probColumn, scaler = None, inv=True, poolMethod="mean")
except Exception as e:
print(e)
print("\nWarning :: There was an error performing clustering and dimensional reduction, using the params:\n" + str(params))
continue
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels":clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by=["Cluster Labels"])
if usePeaks:
df["E"] = df["Entry"].str.split("_",expand=True)[0]
df = df.set_index("E")
else:
df = df.set_index("Entry")
# clusteredComplexes = df[df["Cluster Labels"] != -1]
df, score, matchingResults = self._scoreComplexes(df)
# df = df.join(assignedIDs[["ComplexID"]])
if True:#maxScore > score: # write out all
df.to_csv(os.path.join( pathToFolder,"Complexes:{}_{}_{}.txt".format(groupName,n,score)),sep="\t")
matchingResults.to_csv(os.path.join( pathToFolder,"ComplexPerEntry(ScoreCalc):{}_{}_{}.txt".format(groupName,n,score)),sep="\t")
print("Info :: Current best params ... ")
# squaredDf = pd.DataFrame(matrix,columns=df.index,index=df.index).loc[df.index,df.index]
# squaredDf.to_csv(os.path.join(pathToFolder,"SquaredSorted{}_{}.txt".format(groupName,n)),sep="\t")
# if usePeaks:
# noNoiseIndex = df["Entry"].loc[df["Cluster Labels"] > 0]
# else:
# noNoiseIndex = df.index[df["Cluster Labels"] > 0]
# squaredDf.loc[noNoiseIndex,noNoiseIndex].to_csv(os.path.join(pathToFolder,"NoNoiseSquaredSorted_{}_{}.txt".format(groupName,n)),sep="\t")
maxScore = score
bestDf = df
self._plotComplexProfiles(bestDf, pathToFolder, str(n))
if embedd is not None and plotEmbedding:
#save embedding
umapColumnNames = ["UMAP_{}".format(n) for n in range(embedd.shape[1])]
dfEmbed = pd.DataFrame(embedd, columns = umapColumnNames)
embedd = dfEmbed[umapColumnNames]
dfEmbed["clusterLabels"] = clusterLabels
if usePeaks:
dfEmbed["Ep"] = intLabels
dfEmbed["Entry"] = [x.split("_")[0] for x in intLabels]
else:
dfEmbed["Entry"] = intLabels
dfEmbed = dfEmbed.set_index("Entry")
dfEmbed.loc[dfEmbed.index,"ComplexID"] = df["ComplexID"].loc[dfEmbed.index]
rawDataMerge = [self.Xs[analysisName] for analysisName in groupFiles]
if n == 0:
for sampleN,fileName in enumerate(groupFiles):
rawDataMerge[sampleN].columns = ["{}_({}):F{}".format(colName,fileName,sampleN) for colName in rawDataMerge[sampleN].columns]
dfEmbed = dfEmbed.join(other = rawDataMerge)
try:
dfEmbed.to_csv(os.path.join(pathToFolder,"UMAP_Embeding_{}_{}.txt".format(n,groupName)),sep="\t")
except:
print("Saving umap embedding failed.")
#plot embedding.
fig, ax = plt.subplots()
ax.scatter(embedd["UMAP_0"].values, embedd["UMAP_1"].values,s=50, c=clusterLabels, cmap='Spectral')
plt.savefig(os.path.join(pathToFolder,"UMAP_Embedding_{}_n{}.pdf".format(groupName,n)))
plt.close()
recordScore[n] = {"score":score,"params":params}
def _loadAndFilterDistanceMatrix(self):
"""
Output to disk: 'highQualityInteractions(..).txt
However they are just the ones that show the lowest distance metrices.
Parameters
----------
Returns
-------
None
"""
metricColumns = [x if not isinstance(x,dict) else x["name"] for x in self.params["metrices"]]
dfColumns = ["E1","E2","E1E2","apexPeakDist"] + metricColumns
q = None
df = pd.DataFrame(columns = dfColumns)
filteredExisting = False
pathToFile = os.path.join(self.params["pathToComb"],"highQualityInteractions({}).txt".format(self.currentAnalysisName))
for X,nChunks in self._loadPairsForPrediction():
boolSelfIntIdx = X[:,0] == X[:,1]
X = X[boolSelfIntIdx == False]
if q is None:
df = df.append(pd.DataFrame(X, columns = dfColumns), ignore_index=True)
else:
if not filteredExisting:
#first reduce existing df
mask = df[metricColumns] < q#X[:,[n+4 for n in range(len(self.params["metrices"]))]] < q
df = df.loc[np.any(mask,axis=1)] #filtered
filteredExisting = True
toAttach = pd.DataFrame(X, columns = dfColumns)
mask = toAttach[metricColumns] < q
toAttach = toAttach.loc[np.any(mask,axis=1)]
df = df.append(toAttach, ignore_index=True)
if df.index.size > 50000 and q is None:
q = np.quantile(df[metricColumns].astype(float).values, q = 1-self.params["metricQuantileCutoff"], axis = 0)
print("Info :: {} total pairwise protein-protein pairs at any distance below 10% quantile.".format(df.index.size))
df = self._attachPeakIDtoEntries(df)
df.to_csv(pathToFile, sep="\t")
print("Info :: Saving low distance interactions in result folder.")
return df
def _plotComplexProfiles(self,complexDf,outputFolder,name):
"""
Creates line charts as pdf for each profile.
Chart has two axes, one shows realy values and the bottom one
is scaled by normalizing the highest value to one and the lowest to zero.
Enabled/Disabled by the parameter "plotComplexProfiles".
Parameters
----------
complexDf : pd.DataFrame
asd
outputFolder : string
Path to folder, will be created if it does not exist.
name : string
Name of complex.
Returns
-------
None
"""
if self.params["plotComplexProfiles"]:
toProfiles = self._makeFolder(outputFolder,"complexProfiles")
pathToFolder = self._makeFolder(toProfiles,str(name))
x = np.arange(0,len(self.X.columns))
for c in complexDf["Cluster Labels"].unique():
if c != -1:
fig, ax = plt.subplots(nrows=2,ncols=1)
entries = complexDf.loc[complexDf["Cluster Labels"] == c,:].index
lineColors = sns.color_palette("Blues",desat=0.8,n_colors=entries.size)
for n,e in enumerate(entries):
uniprotID = e.split("_")[0]
if uniprotID in self.Signals[self.currentAnalysisName]:
y = self.Signals[self.currentAnalysisName][uniprotID].Y
normY = y / np.nanmax(y)
ax[0].plot(x,y,linestyle="-",linewidth=1, label=e, color = lineColors[n])
ax[1].plot(x,normY,linestyle="-",linewidth=1, label=e, color = lineColors[n])
plt.legend(prop={'size': 5})
plt.savefig(os.path.join(pathToFolder,"{}_n{}.pdf".format(c,len(entries))))
plt.close()
def _attachPeakIDtoEntries(self,predInts):
""
if not "apexPeakDist" in predInts.columns:
return predInts
peakIds = [peakID.split("_") for peakID in predInts["apexPeakDist"]]
predInts["E1p"], predInts["E2p"] = zip(*[("{}_{}".format(E1,peakIds[n][0]),"{}_{}".format(E2,peakIds[n][1])) for n,(E1,E2) in enumerate(zip(predInts["E1"],predInts["E2"]))])
return predInts
def _makeFolder(self,*args):
""
pathToFolder = os.path.join(*args)
if not os.path.exists(pathToFolder):
os.mkdir(pathToFolder)
return pathToFolder
def _createTxtFile(self,pathToFile,headers):
""
with open(pathToFile,"w+") as f:
f.write("\t".join(headers))
def _makeTmpFolder(self, n = 0):
"""
Creates temporary fodler.
Parameters
----------
n : int
Returns
-------
pathToTmp : str
ansolute path to tmp/anlysis name folder.
"""
if self.params["analysisName"] is None:
analysisName = self._randomStr(50)
elif isinstance(self.params["analysisName"],list) and n < len(self.params["analysisName"]):
analysisName = self.params["analysisName"][n]
else:
analysisName = str(self.params["analysisName"])
#check if results folder exists.
pathToTmp = os.path.join(".","results")
if not os.path.exists(pathToTmp):
os.mkdir(pathToTmp)
self.currentAnalysisName = analysisName
date = datetime.today().strftime('%Y-%m-%d')
self.params["Date of anaylsis"] = date
runName = self.params["runName"] if self.params["runName"] is not None else self._randomStr(3)
self.params["pathToComb"] = self._makeFolder(pathToTmp,"{}_n({})runs".format(runName,len(self.params["analysisName"])))
print("Info :: Folder created in which combined results will be saved: " + self.params["pathToComb"])
pathToTmpFolder = os.path.join(self.params["pathToComb"],analysisName)
if os.path.exists(pathToTmpFolder):
print("Info :: Path to results folder exsists")
if self.params["restartAnalysis"]:
print("Warning :: Argument restartAnalysis was set to True .. cleaning folder.")
#to do - shift to extra fn
for root, dirs, files in os.walk(pathToTmpFolder):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
else:
print("Info :: Will take files from there, if they exist")
return pathToTmpFolder
try:
self._makeFolder(pathToTmpFolder)
print("Info :: Result folder created -- ",analysisName)
self._makeFolder(pathToTmpFolder,"chunks")
print("Info :: Chunks folder created/checked")
self._makeFolder(pathToTmpFolder,"result")
print("Info :: Result folder created/checked")
self._makeFolder(pathToTmpFolder,"result","alignments")
print("Info :: Alignment folder created/checked")
self._makeFolder(pathToTmpFolder,"result","modelPlots")
print("Info :: Result/modelPlots folder created/checked. In this folder, all model plots will be saved here, if savePlots equals true, otherwise empty.")
# self._createTxtFile(pathToFile = os.path.join(pathToTmpFolder,"runTimes.txt"),headers = ["Date","Time","Step","Comment"])
return pathToTmpFolder
except OSError as e:
print(e)
raise OSError("Could not create result folder due to OS Error")
def _handleComptabFormat(self,X,filesToLoad):
"""
Extracts different samples from comptab format.
Parameters
----------
X : str
Path to folder where comptab files is located
filesToLoad:
list of txt/tsv files present in the folder
Returns
-------
detectedDataFrames : list of pd.DataFrame
list of identified data farmes from compbat file
fileNames : list of str
Internal names <comptabfileName>:<sampleName>
"""
detectedDataFrames = []
fileNames = []
for fileName in filesToLoad:
comptFile = pd.read_csv(os.path.join(X,fileName), sep="\t", header=[0,1], index_col=0)
columnsToKeep = [colNameTuple for colNameTuple in comptFile.columns if "unique peptides" not in colNameTuple and "coverage" not in colNameTuple and "protein length" not in colNameTuple]
comptFile = comptFile[columnsToKeep]
#find unique sample names given in the first header
samples = np.unique([colNameTuple[0] for colNameTuple in comptFile.columns])
for sampleName in samples:
sampleColumns = [colNameTuple for colNameTuple in comptFile.columns if colNameTuple[0] == sampleName]
dataFrame = pd.DataFrame(comptFile[sampleColumns].values,
columns = [colNameTuple[1] for colNameTuple in comptFile.columns if colNameTuple[0] == sampleName])
dataFrame["Uniprot ID"] = comptFile.index
detectedDataFrames.append(dataFrame)
fileNames.append("{}:{}".format(fileName,sampleName))
return detectedDataFrames, fileNames
def _mergeDistancesForGroups(self):
""
def run(self,X, maxValueToOne = False):
"""
Runs the ComplexFinder Script.
Parameters
----------
X : str, list, pd.DataFrame
Returns
-------
pathToTmp : str
ansolute path to tmp/anlysis name folder.
"""
self.allSamplesFound = False
self.entriesInChunkLoaded = False
global entriesInChunks
if isinstance(X,list) and all(isinstance(x,pd.DataFrame) for x in X):
if self.params["compTabFormat"]:
raise TypeError("If 'compTabFormat' is True. X must be a path to a folder. Either set compTabFormat to False or provide a path.")
print("Multiple dataset detected - each one will be analysed separetely")
if self.params["analysisName"] is None or not isinstance(self.params["analysisName"],list) or len(self.params["analysisName"]) != len(X):
self.params["analysisName"] = [self._randomStr(10) for n in range(len(X))] #create random analysisNames
print("Info :: 'anylsisName' did not match X shape. Created random strings per dataframe.")
elif isinstance(X,str):
if os.path.exists(X):
loadFiles = [f for f in os.listdir(X) if f.endswith(".txt") or f.endswith(".tsv")]
if self.params["compTabFormat"]:
Xs, loadFiles = self._handleComptabFormat(X,loadFiles)
else:
Xs = [pd.read_csv(os.path.join(X,fileName), sep="\t") for fileName in loadFiles]
#filterId = pd.read_csv(os.path.join("filter","SPY.txt"),index_col=None)
#Xs = [X.loc[X["Protein.Group"].isin(filterId["MouseMito"].values)] for X in Xs]
self.params["analysisName"] = loadFiles
if maxValueToOne:
maxValues = pd.concat([x.max(axis=1) for x in X], axis=1).max(axis=1)
normValueDict = dict([(X[0][self.params["idColumn"]].values[n],maxValue) for n,maxValue in enumerate(maxValues.values)])
self.params["normValueDict"] = normValueDict
else:
raise ValueError("Provided path {} does not exist.".format(X))
elif isinstance(X,pd.DataFrame):
Xs = [X]
self.params["analysisName"] = [self._randomStr(10)]
else:
ValueError("X must be either a string, a list of pandas data frames or pandas data frame itself.")
self.params["pathToTmp"] = {}
statColumns = ["nInteractions ({})".format(self.params["interactionProbabCutoff"]),"nPositiveInteractions","OOB_Score","ROC_Curve_AUC","Metrices","Classifier","ClassifierParams"]
self.stats = pd.DataFrame(index = self.params["analysisName"],columns = statColumns)
self.params["rawData"] = {}
self.params["runTimes"] = {}
self.params["runTimes"]["StartTime"] = time.time()
for n,X in enumerate(Xs):
pathToTmpFolder = self._makeTmpFolder(n)
self.params["pathToTmp"][self.currentAnalysisName] = pathToTmpFolder
if n == 0:
pathToParams = os.path.join(self.params["pathToComb"],"params.json")
pd.DataFrame().from_dict(self.params,orient="index").sort_index().to_json(pathToParams,indent = 4, orient="columns")
print("Info :: Parameters saved to output folder.")
if os.path.exists(os.path.join(self.params["pathToComb"],"runTimes.txt")):
if not self.params["restartAnalysis"] and not self.params["recalculateDistance"] and not self.params["retrainClassifier"]:
print("Warning :: Analysis done. Aborting (detected by finding the file 'runTimes.txt'")
return
print("------------------------")
print("--"+self.currentAnalysisName+"--")
print("--------Started---------")
print("--Signal Processing &--")
print("------Peak Fitting------")
print("------------------------")
if pathToTmpFolder is not None:
#loading data
self._load(X)
#self._checkGroups()
self._findPeaks(self.params["n_jobs"])
self._collectRSquaredAndFitDetails()
self._saveSignals()
combinedPeakModel = self._combinePeakResults()
self._attachQuantificationDetails(combinedPeakModel)
endSignalTime = time.time()
self.params["runTimes"]["SignalFitting&Comparision"] = time.time() - self.params["runTimes"]["StartTime"]
if not self.params["justFitAndMatchPeaks"]:
print("Info :: Peak modeling done. Starting with distance calculations and predictions (if enabled)..")
self._createSignalChunks()
for n,X in enumerate(X):
if n < len(self.params["analysisName"]): #happnes if others than txt file are present
self.currentAnalysisName = self.params["analysisName"][n]
print(self.currentAnalysisName," :: Starting distance calculations.")
self._calculateDistance()
self._mergeDistancesForGroups()
self.params["runTimes"]["Distance Calculation"] = time.time() - endSignalTime
distEndTime = time.time()
self._loadReferenceDB()
for analysisName in self.params["analysisName"]:
self._addMetricesToDB(analysisName)
dataPrepEndTime = time.time()
self.params["runTimes"]["Database Preparation"] = dataPrepEndTime - distEndTime
self._trainPredictor(self.params["addImpurity"])
for analysisName in self.params["analysisName"]:
self.currentAnalysisName = analysisName
self._predictInteractions()
#
#save statistics
self.stats.to_csv(os.path.join(self.params["pathToComb"],"statistics.txt"),sep="\t")
#combine interactions
if not self.params["noDistanceCalculationAndPrediction"]:
if not self.params["noDatabaseForPredictions"]:
combinedInteractions = self._combineInteractionsAndClusters()
else:
print("Warning/Info :: noDistancenCalculationAndPrediction is True, skipping combineInteraction step.")
endTrainingTime = time.time()
self.params["runTimes"]["Classifier Training & Prediction"] = endTrainingTime - dataPrepEndTime
if not self.params["noDistanceCalculationAndPrediction"] and len(self.params["grouping"]) > 0 and not self.params["noDatabaseForPredictions"]:
for groupName,groupFileNames in self.params["grouping"].items():
if isinstance(groupFileNames,str):
groupFileNames = [groupFileNames]
self._clusterInteractions(combinedInteractions,groupFiles = groupFileNames,groupName = groupName)
else:
print("Info :: Cluster Interactions")
self._clusterInteractions(None)
self.params["runTimes"]["Interaction Clustering and Embedding"] = time.time() - endTrainingTime
print("Info :: Run Times :: ")
print(self.params["runTimes"])
pd.DataFrame().from_dict(self.params["runTimes"],orient="index").to_csv(os.path.join(self.params["pathToComb"],"runTimes.txt"),sep="\t")
print("Info :: Analysis done.")
def _combinePredictedInteractions(self, pathToComb):
"""
Combines predicted Interactions based on the output
files : predictedInteractions[..].txt of each run.
Parameters
----------
pathToComb : str, path to combined result folder.
Returns
-------
combResults : pd.DataFrame
combined data frame for each run. All metrices and predictions are provided.
"""
pathToInteractions = os.path.join(pathToComb,"combinedInteractions.txt")
if False and os.path.exists(pathToInteractions) and not self.params["retrainClassifier"]:
combResults = pd.read_csv(pathToInteractions,sep="\t")
combResults = self._filterCombinedInteractions(combResults)
print("Info :: Combined interactions found and loaded.")
return combResults
print("Info :: Combining interactions of runs.")
preditctedInteractions = []
for analysisName in self.params["analysisName"]:
pathToResults = os.path.join(self.params["pathToTmp"][analysisName],"result")
pathToPrediction = os.path.join(pathToResults,"predictedInteractions{}_{}_{}.txt".format(self.params["metricesForPrediction"],self.params["classifierClass"],self.params["addImpurity"]))
if os.path.exists(pathToPrediction):
df = pd.read_csv(pathToPrediction,sep="\t", low_memory=False).set_index(["E1E2","E1","E2"])
df = df.loc[df["Prob_0"] > self.params["interactionProbabCutoff"]]
preditctedInteractions.append(df)
else:
raise ValueError("Warning :: PredictedInteractions not found. " + str(pathToPrediction))
for n,df in enumerate(preditctedInteractions):
analysisName = self.params["analysisName"][n]
if n == 0:
combResults = df
combResults.columns = ["{}_({})".format(colName,analysisName) for colName in df.columns]
combResults[analysisName] = pd.Series(["+"]*df.index.size, index = df.index)
else:
df.columns = ["{}_({})".format(colName,analysisName) for colName in df.columns]
#columnNames = [colName for colName in df.columns if colName] # we have them already from n = 0
df[analysisName] = pd.Series(["+"]*df.index.size, index = df.index)
# combResults["validSignal({})".format(analysisName)] = df[["E1_({})".format(analysisName),"E2_({})".format(analysisName)]].apply(lambda x: all(e in self.Signals[analysisName] and self.Signals[analysisName][e].valid for e in x.values),axis=1)
combResults = combResults.join(df, how="outer")
combResults = combResults.reset_index()
for analysisName in self.params["analysisName"]:
combResults["validSignalFit({})".format(analysisName)] = combResults[["E1","E2"]].apply(lambda x: all(e in self.Signals[analysisName] and self.Signals[analysisName][e].valid for e in x.values),axis=1)
combResults["#Valid Signal Fit"] = combResults[["validSignalFit({})".format(analysisName) for analysisName in self.params["analysisName"]]].sum(axis=1)
detectedColumn = [analysisName for analysisName in self.params["analysisName"]]
#detected in grouping
for groupName,groupItems in self.params["grouping"].items():
if all(groupItem in combResults.columns for groupItem in groupItems):
boolIdx = combResults[groupItems] == "+"
if isinstance(boolIdx,pd.Series):
#grouping equals 1 (groupItems, nonsenese (always ture), but repoted due to conisitency)
combResults["Complete in {}".format(groupName)] = boolIdx
else:
combResults["Complete in {}".format(groupName)] = np.sum(boolIdx,axis=1) == len(groupItems)
boolIdx = combResults[detectedColumn] == "+"
combResults["# Detected in"] = np.sum(boolIdx,axis=1)
combResults.sort_values(by="# Detected in", ascending = False, inplace = True)
# combResults.loc[combResults["E1E2"].str.contains("A0A087WU95")].to_csv("BiasedSelection.txt",sep="\t")
combResults.to_csv(pathToInteractions,sep="\t",index=True)
combResults = self._filterCombinedInteractions(combResults)
return combResults
def _filterCombinedInteractions(self,combResults):
"""
Filters combined interactions.
Parameters
----------
combResults : pd.DataFrame. Combined interactions.
Returns
-------
combResults : pd.DataFrame
filteredCombResults
"""
interactionsInAllSamples = self.params["considerOnlyInteractionsPresentInAllRuns"]
if isinstance(interactionsInAllSamples,bool) and interactionsInAllSamples:
filteredCombResults = combResults.loc[combResults["# Detected in"] == len(self.params["analysisName"])]
elif isinstance(interactionsInAllSamples,int):
if interactionsInAllSamples > len(self.params["analysisName"]):
interactionsInAllSamples = len(self.params["analysisName"])
filteredCombResults = combResults.loc[combResults["# Detected in"] >= interactionsInAllSamples]
else:
#if no filtering is applied.
filteredCombResults = combResults
return filteredCombResults
def _combineInteractionsAndClusters(self):
""
pathToComb = self.params["pathToComb"]
combinedInteractions = self._combinePredictedInteractions(pathToComb)
return combinedInteractions
def _saveSignalFitStatistics(self):
"Save Fit Statistic to disk"
pathToTxt = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result","fitStatistic({}).txt".format(self.currentAnalysisName))
data = [{
"id":signal.ID,
"R2":signal.Rsquared,
"#Peaks":len(signal.modelledPeaks) if hasattr(signal,"modelledPeaks") else 0,
"valid":signal.valid,
"validModel":signal.validModel,
"validData":signal.validData
} for signal in self.Signals[self.currentAnalysisName].values()]
pd.DataFrame().from_dict(data).to_csv(pathToTxt,sep="\t")
def _checkAlignment(self,data):
""
data = data.dropna()
centerColumns = [colName for colName in data.columns if colName.startswith("auc")]
data[centerColumns].corr()
f = plt.figure()
ax = f.add_subplot(111)
ax.scatter(data[centerColumns[0]],data[centerColumns[1]])
plt.show()
def _alignProfiles(self,fittedPeaksData):
""
alignMethod = self.params["alignMethod"]
if len(fittedPeaksData) > 1 and alignMethod in alignModels and os.path.exists(self.params["pathToComb"]):
alignResults = OrderedDict([(analysisName,[]) for analysisName in self.params["analysisName"]])
fittedModels = dict()
removedDuplicates = [X.loc[~X.duplicated(subset=["Key"],keep=False)] for X in fittedPeaksData]
preparedData = []
for n,dataFrame in enumerate(removedDuplicates):
dataFrame.columns = ["{}_{}".format(colName,self.params["analysisName"][n]) if colName != "Key" else colName for colName in dataFrame.columns ]
dataFrame = dataFrame.set_index("Key")
preparedData.append(dataFrame)
#join data frames
joinedDataFrame = preparedData[0].join(preparedData[1:],how="outer")
if joinedDataFrame .index.size < 30:
print("Less than 30 data profiles with single peaks found. Aborting alignment")
return fittedPeaksData
#use linear regression or lowess
for comb in combinations(self.params["analysisName"],2):
c1, c2 = comb
columnHeaders = ["Center_{}".format(c1),"Center_{}".format(c2)]
data = joinedDataFrame.dropna(subset=columnHeaders)[columnHeaders]
absDiff = np.abs(data[columnHeaders[0]] - data[columnHeaders[1]])
pd.DataFrame(data).to_csv("alignedPeaks.txt",sep="\t")
boolIdx = absDiff > 5 #remove everything with a higher difference of 5.
data = data.loc[~boolIdx]
nRows = data.index.size
X, Y = data[[columnHeaders[0]]].values, data[[columnHeaders[1]]].values
model = alignModels["LinearRegression"](**alignModelsParams["LinearRegression"]).fit(X,Y)
lnSpace = np.linspace(np.min(data.values),np.max(data.values)).reshape(-1,1) #get min / max values
Yplot = model.predict(lnSpace)
#store R2
R2 = model.score(X,Y)
alignResults[c1].append(R2)
alignResults[c2].append(R2)
#save model
fittedModels[comb] = {"model":model,"R2":R2}
#plot alignment
f = plt.figure()
ax = f.add_subplot(111)
ax.scatter(joinedDataFrame["Center_{}".format(c1)],joinedDataFrame["Center_{}".format(c2)])
ax.plot(lnSpace,Yplot)
plt.savefig(os.path.join(self.params["pathToComb"],"{}.pdf".format(comb)))
#ax.plot()
#save alignment
o = | pd.DataFrame(lnSpace) | pandas.DataFrame |
import sys
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
import pandas as pd
import numpy as np
from pathlib import Path
# init input df - fishing gear
def init_fishing_df(path):
fishing_df = pd.read_csv('../data/' + path)
# comment out for real life data--------------
fishing_df = fishing_df[fishing_df['is_fishing'] > -0.5]
fishing_df['is_fishing'] = [0 if x < 0.3 else 1 for x in fishing_df['is_fishing']]
fishing_df = fishing_df[['is_fishing', 'lat', 'lon', 'course', 'speed', 'timestamp', 'distance_from_shore', 'distance_from_port', 'mmsi', 'source']]
fishing_df['gear_type'] = Path(path).stem
#---------------------------------------------
return fishing_df
# ------------------------This section only needed when adding sst/precip data-----------------------------
# init input df - sea surface temparature
def init_sst_df(path_sst):
sst_df = pd.read_csv('../data/' + path_sst, index_col=0)
sst_df["time_bnds"] = pd.to_datetime(sst_df["time_bnds"]).dt.to_period('M')
return sst_df
# init input df - precipitation
def init_precip_df(path_precip):
precip_df = pd.read_csv('../data/' + path_precip, index_col=0)
precip_df["time"] = pd.to_datetime(precip_df["time"]).dt.to_period('M')
return precip_df
# ------------------------This section only needed when slicing lon/lat or time-----------------------------
# custom rounding functions
def custom_season(x):
return np.round(int(x)/3)
def custom_round(x):
return 0.5 + np.floor(float(x))
# ------------------------Functions to combine/add features and feature engineering-----------------------------
def time_feature(df):
df["adjust_time_date"] = | pd.to_datetime(df['timestamp'], unit='s') | pandas.to_datetime |
'''
'''
import os, glob
try:
from icecube import dataclasses, icetray, dataio
from icecube import genie_icetray
except ModuleNotFoundError:
# Not running in IceTray
pass
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import sqlalchemy
import time
from multiprocessing import Pool
import pickle
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-config", "--config", type=str, required=True)
def contains_retro(frame):
try:
frame['L7_reconstructed_zenith']
return True
except:
return False
def build_standard_extraction():
standard_truths = {'energy': 'MCInIcePrimary.energy',
'position_x': 'MCInIcePrimary.pos.x',
'position_y': 'MCInIcePrimary.pos.y',
'position_z': 'MCInIcePrimary.pos.z',
'azimuth': 'MCInIcePrimary.dir.azimuth',
'zenith': 'MCInIcePrimary.dir.zenith',
'pid': 'MCInIcePrimary.pdg_encoding',
'event_time': 'event_time',
'sim_type': 'sim_type',
'interaction_type': 'interaction_type',
'elasticity': 'elasticity',
'RunID': 'RunID',
'SubrunID': 'SubrunID',
'EventID': 'EventID',
'SubEventID': 'SubEventID'}
return standard_truths
def case_handle_this(frame, sim_type):
if sim_type != 'noise':
MCInIcePrimary = frame['MCInIcePrimary']
else:
MCInIcePrimary = None
if sim_type != 'muongun' and sim_type != 'noise':
interaction_type = frame["I3MCWeightDict"]["InteractionType"]
elasticity = frame['I3GENIEResultDict']['y']
else:
interaction_type = -1
elasticity = -1
return MCInIcePrimary, interaction_type, elasticity
def is_montecarlo(frame):
mc = True
try:
frame['MCInIcePrimary']
except:
mc = False
return mc
def build_blank_extraction():
## Please note that if the simulation type is pure noise or real that these values will be appended to the truth table
blank_extraction = {'energy_log10': '-1',
'position_x': '-1',
'position_y': '-1',
'position_z': '-1',
'azimuth': '-1',
'zenith': '-1',
'pid': '-1',
'event_time': 'event_time',
'sim_type': 'sim_type',
'interaction_type': '-1',
'elasticity': '-1',
'RunID': 'RunID',
'SubrunID': 'SubrunID',
'EventID': 'EventID',
'SubEventID': 'SubEventID'}
return blank_extraction
def build_retro_extraction(is_mc):
retro_extraction = {'azimuth_retro': 'frame["L7_reconstructed_azimuth"].value',
'time_retro': 'frame["L7_reconstructed_time"].value',
'energy_retro': 'frame["L7_reconstructed_total_energy"].value',
'position_x_retro': 'frame["L7_reconstructed_vertex_x"].value',
'position_y_retro': 'frame["L7_reconstructed_vertex_y"].value',
'position_z_retro': 'frame["L7_reconstructed_vertex_z"].value',
'zenith_retro': 'frame["L7_reconstructed_zenith"].value',
'azimuth_sigma': 'frame["L7_retro_crs_prefit__azimuth_sigma_tot"].value',
'position_x_sigma': 'frame["L7_retro_crs_prefit__x_sigma_tot"].value',
'position_y_sigma': 'frame["L7_retro_crs_prefit__y_sigma_tot"].value',
'position_z_sigma': 'frame["L7_retro_crs_prefit__z_sigma_tot"].value',
'time_sigma': 'frame["L7_retro_crs_prefit__time_sigma_tot"].value',
'zenith_sigma': 'frame["L7_retro_crs_prefit__zenith_sigma_tot"].value',
'energy_sigma': 'frame["L7_retro_crs_prefit__energy_sigma_tot"].value',
'cascade_energy_retro': 'frame["L7_reconstructed_cascade_energy"].value',
'track_energy_retro': 'frame["L7_reconstructed_track_energy"].value',
'track_length_retro': 'frame["L7_reconstructed_track_length"].value',
'lvl7_probnu': 'frame["L7_MuonClassifier_FullSky_ProbNu"].value',
'lvl4_probnu': 'frame["L4_MuonClassifier_Data_ProbNu"].value',
'lvl7_prob_track': 'frame["L7_PIDClassifier_FullSky_ProbTrack"].value'}
if is_mc:
retro_extraction['osc_weight'] = 'frame["I3MCWeightDict"]["weight"]'
return retro_extraction
def extract_retro(frame):
is_mc = is_montecarlo(frame)
retro = {}
if contains_retro(frame):
retro_extraction = build_retro_extraction(is_mc)
for retro_variable in retro_extraction.keys():
retro[retro_variable] = eval(retro_extraction[retro_variable])
return retro
def extract_truth(frame, input_file, extract_these_truths = None):
if extract_these_truths == None:
extract_these_truths = build_standard_extraction()
is_mc = is_montecarlo(frame)
sim_type = find_simulation_type(is_mc,input_file)
event_time = frame['I3EventHeader'].start_time.utc_daq_time
RunID, SubrunID, EventID, SubEventID = extract_event_ids(frame)
if is_mc:
MCInIcePrimary, interaction_type, elasticity = case_handle_this(frame, sim_type)
if MCInIcePrimary != None:
## is not noise
truth = {}
for truth_variable in extract_these_truths.keys():
truth[truth_variable] = eval(extract_these_truths[truth_variable])
else:
## is real data or noise
blank_extraction = build_blank_extraction()
truth = {}
for truth_variable in blank_extraction.keys():
truth[truth_variable] = eval(blank_extraction[truth_variable])
return truth
def extract_features(frame, key, gcd_dict,calibration):
charge = []
time = []
width = []
area = []
rqe = []
x = []
y = []
z = []
if key in frame.keys():
data = frame[key]
try:
om_keys = data.keys()
except:
try:
if "I3Calibration" in frame.keys():
data = frame[key].apply(frame)
om_keys = data.keys()
else:
frame["I3Calibration"] = calibration
data = frame[key].apply(frame)
om_keys = data.keys()
except:
data = dataclasses.I3RecoPulseSeriesMap.from_frame(frame,key)
om_keys = data.keys()
for om_key in om_keys:
pulses = data[om_key]
for pulse in pulses:
charge.append(pulse.charge)
time.append(pulse.time)
width.append(pulse.width)
area.append(gcd_dict[om_key].area)
rqe.append(frame["I3Calibration"].dom_cal[om_key].relative_dom_eff)
x.append(gcd_dict[om_key].position.x)
y.append(gcd_dict[om_key].position.y)
z.append(gcd_dict[om_key].position.z)
features = {'charge': charge,
'dom_time': time,
'dom_x': x,
'dom_y': y,
'dom_z': z,
'width' : width,
'pmt_area': area,
'rde': rqe}
return features
def find_simulation_type(mc, input_file):
if mc == False:
sim_type = 'data'
else:
sim_type = 'lol'
if 'muon' in input_file:
sim_type = 'muongun'
if 'corsika' in input_file:
sim_type = 'corsika'
if 'genie' in input_file:
sim_type = 'genie'
if 'noise' in input_file:
sim_type = 'noise'
if sim_type == 'lol':
print('SIM TYPE NOT FOUND!')
return sim_type
def load_geospatial_data(gcd_path):
gcd_file = dataio.I3File(gcd_path)
g_frame = gcd_file.pop_frame(icetray.I3Frame.Geometry)
om_geom_dict = g_frame["I3Geometry"].omgeo
calibration = gcd_file.pop_frame(icetray.I3Frame.Calibration)["I3Calibration"]
return om_geom_dict, calibration
def is_empty(features):
if features['dom_x'] != None:
return False
else:
return True
def extract_event_ids(frame):
RunID = frame['I3EventHeader'].run_id
SubrunID = frame['I3EventHeader'].sub_run_id
EventID = frame['I3EventHeader'].event_id
SubEventID = frame['I3EventHeader'].sub_event_id
return RunID, SubrunID, EventID, SubEventID
def apply_event_no(extraction, event_no_list, event_counter):
out = pd.DataFrame(extraction.values()).T
out.columns = extraction.keys()
out['event_no'] = event_no_list[event_counter]
return out
def check_for_new_columns(columns, biggest_columns):
if len(columns) > len(biggest_columns):
return columns
else:
return biggest_columns
def write_dicts(settings):
input_files,id,gcd_files,outdir , max_dict_size,event_no_list, pulse_map_keys,custom_truth, db_name,verbose = settings
# Useful bits
event_counter = 0
feature_big = {}
truth_big = pd.DataFrame()
retro_big = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = | DataFrame([(1,)], columns=['var']) | pandas.core.frame.DataFrame |
import datetime
import numpy as np
from pandas.compat import IS64, is_platform_windows
from pandas import Categorical, DataFrame, Series, date_range
import pandas._testing as tm
class TestIteration:
def test_keys(self, float_frame):
assert float_frame.keys() is float_frame.columns
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH#17213, GH#13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_items_names(self, float_string_frame):
for k, v in float_string_frame.items():
assert v.name == k
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH#19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# GH#12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
ser = DataFrame._constructor_sliced(tup[1:])
ser.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
| tm.assert_series_equal(ser, expected) | pandas._testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
@brief test log(time=2s)
"""
import unittest
import pandas
import numpy
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from pyquickhelper.pycode import ExtTestCase
from mlinsights.search_rank import SearchEnginePredictions
class TestSearchPredictions(ExtTestCase):
def test_search_predictions_lr(self):
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
clf = LogisticRegression()
clf.fit(X, y)
res = []
for i in range(20):
h = i * 0.05
h2 = 1 - i * 0.05
res.append(dict(ind=i * 5, meta1="m%d" %
i, meta2="m%d" % (i + 1), f1=h, f2=h2))
df = | pandas.DataFrame(res) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), | pd.Timedelta('02:30:00') | pandas.Timedelta |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3,4,7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEquals(self.ts.ix[d1], self.ts[d1])
self.assertEquals(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][::-1]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assert_((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assert_((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s[::-1]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_ix_setitem(self):
inds = self.series.index[[3,4,7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3,4,7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEquals(self.series[d1], 4)
self.assertEquals(self.series[d2], 6)
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# tuple name, e.g. from hierarchical index
self.series.name = ('foo', 'bar', 'baz')
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
def test_to_string(self):
from cStringIO import StringIO
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, '')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, '')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Name: foo, Length: %d" % len(cp))
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 -1.23\n'
'3 4.56')
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 5\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = ('0 NaN\n'
'1 1.568\n'
'2 NaN\n'
'3 -3.000\n'
'4 NaN')
self.assertEqual(result, expected)
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assert_(getkeys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert_almost_equal(s.sum(), s2.sum())
import pandas.core.nanops as nanops
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
res = nanops.nansum(arr, axis=1)
expected = nanops._nansum(arr, axis=1)
assert_almost_equal(res, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = TimeSeries(np.ones(10, dtype=int), index=range(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_var(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_skew(self):
from scipy.stats import skew
alt =lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assert_(issubclass(argsorted.dtype.type, np.integer))
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def _check_stat_op(self, name, alternate, check_objects=False):
from pandas import DateRange
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assert_(notnull(f(self.series)))
self.assert_(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona))
allna = self.series * nan
self.assert_(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# check DateRange
if check_objects:
s = Series(DateRange('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_(np.array_equal(func(self.ts), func(np.array(self.ts))))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_(np.array_equal(result, expected))
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index)
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.] , index=range(3))
result = s.prod()
self.assert_(not isinstance(result, Series))
def test_quantile(self):
from scipy.stats import scoreatpercentile
q = self.ts.quantile(0.1)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 90))
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count' : 7, 'unique' : 4,
'top' : 'a', 'freq' : 3}, index=result.index)
assert_series_equal(result, expected)
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
np.random.seed(12345)
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assert_(not bool_series.all())
self.assert_(bool_series.any())
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv',
'gt', 'ge', 'lt', 'le']
for opname in simple_ops:
_check_op(other, getattr(operator, opname))
_check_op(other, operator.pow, pos_only=True)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.truediv(y, x))
_check_op(other, lambda x, y: operator.floordiv(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x),
pos_only=True)
check(self.ts * 2)
check(self.ts * 0)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x' : 0.})
# it works!
_ = s1 * s2
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assert_(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmin()))
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assert_(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmax()))
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
from pandas.util.testing import rands
import operator
# GH 353
vals = Series([rands(5) for _ in xrange(10)])
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals' : vals})
result = 'foo_' + frame
expected = DataFrame({'vals' : vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
ops = [Series.add, Series.sub, Series.mul, Series.div]
equivs = [operator.add, operator.sub, operator.mul]
if py3compat.PY3:
equivs.append(operator.truediv)
else:
equivs.append(operator.div)
fillvals = [0, 0, 1, 1]
for op, equiv_op, fv in zip(ops, equivs, fillvals):
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_(np.array_equal(combined, series))
# Holes filled from input
combined = series_copy.combine_first(series)
self.assert_(np.isfinite(combined).all())
self.assert_(np.array_equal(combined[::2], series[::2]))
self.assert_(np.array_equal(combined[1::2], series_copy[1::2]))
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_corr(self):
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
# No overlap
self.assert_(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if int(scipy.__version__.split('.')[1]) < 9:
raise nose.SkipTest
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
0.76910310, -0.06430576, -2.09704447, 0.40660407,
-0.89926396, 0.94209606])
B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,
-0.01680292, 1.17258718, -1.06009347, -0.10222060,
-0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std()**2)
# partial overlap
self.assertAlmostEqual(self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std()**2)
# No overlap
self.assert_(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.cov(cp)))
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
def test_value_counts_nunique(self):
s = Series(['a', 'b', 'b', 'b', 'b', 'a', 'c', 'd', 'd', 'a'])
hist = s.value_counts()
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
assert_series_equal(hist, expected)
self.assertEquals(s.nunique(), 4)
# handle NA's properly
s[5:7] = np.nan
hist = s.value_counts()
expected = s.dropna().value_counts()
assert_series_equal(hist, expected)
s = Series({})
hist = s.value_counts()
expected = Series([])
assert_series_equal(hist, expected)
def test_sort(self):
ts = self.ts.copy()
ts.sort()
self.assert_(np.array_equal(ts, self.ts.order()))
self.assert_(np.array_equal(ts.index, self.ts.order().index))
def test_sort_index(self):
import random
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index()
assert_series_equal(sorted_series, self.ts)
# descending
sorted_series = random_order.sort_index(ascending=False)
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
def test_order(self):
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.order()
self.assert_(np.isnan(result[-5:]).all())
self.assert_(np.array_equal(result[:-5], np.sort(vals[5:])))
result = ts.order(na_last=False)
self.assert_(np.isnan(result[:5]).all())
self.assert_(np.array_equal(result[5:], np.sort(vals[5:])))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.order()
# ascending=False
ordered = ts.order(ascending=False)
expected = np.sort(ts.valid().values)[::-1]
assert_almost_equal(expected, ordered.valid().values)
ordered = ts.order(ascending=False, na_last=False)
assert_almost_equal(expected, ordered.valid().values)
def test_rank(self):
from scipy.stats import rankdata
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
exp = rankdata(filled)
exp[mask] = np.nan
assert_almost_equal(ranks, exp)
def test_from_csv(self):
self.ts.to_csv('_foo')
ts = Series.from_csv('_foo')
assert_series_equal(self.ts, ts)
self.series.to_csv('_foo')
series = Series.from_csv('_foo')
assert_series_equal(self.series, series)
outfile = open('_foo', 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
series = Series.from_csv('_foo',sep='|')
checkseries = Series({datetime(1998,1,1): 1.0, datetime(1999,1,1): 2.0})
assert_series_equal(checkseries, series)
series = Series.from_csv('_foo',sep='|',parse_dates=False)
checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
assert_series_equal(checkseries, series)
os.remove('_foo')
def test_to_csv(self):
self.ts.to_csv('_foo')
lines = open('_foo', 'U').readlines()
assert(lines[1] != '\n')
os.remove('_foo')
def test_to_dict(self):
self.assert_(np.array_equal(Series(self.ts.to_dict()), self.ts))
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import os
import pandas as pd
from typing import Union
import data
import numpy as np
def runs()->pd.DataFrame:
"""Get meta data about the runs
Returns:
pd.DataFrame: Meta data for all runs
"""
dir_path = os.path.join(os.path.dirname(data.__file__),'raw')
df_runs = pd.read_csv(os.path.join(dir_path,'runs.csv'), index_col=0)
return df_runs
def load_run(id:int, dir_path='../data/raw')->pd.DataFrame:
"""Load time series for one run.
Args:
id (int): id of run
Returns:
pd.DataFrame: time series as a data frame.
"""
file_name = f'{id}.csv'
file_path = os.path.join(dir_path, file_name)
df = | pd.read_csv(file_path, index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import os
from datetime import datetime
from IPython.display import IFrame,clear_output
# for PDF reading
import textract
import re
import sys
import docx
from difflib import SequenceMatcher
#######################################################################################
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
#######################################################################################
def dms_to_dd(x,as_string=True):
d,m,s = x.split()
result = abs(float(d)) + float(m)/60. + float(s)/3600.
if float(d) < 0:
result = -result
return result
#######################################################################################
def convert_state(state):
return {'New Hampshire':'NH','Maine':'ME',
'Massachusetts':'MA','New Hampshire/Maine':'NH'}[state]
#######################################################################################
def doy_to_date(x, year=2008, jan1=1):
# jan1 is Day 1, usually
#if np.isnan(x):
# return np.nan
#print(x)
result = ( pd.Period(year = year-1, month=12, day=31, freq='D') +
pd.to_timedelta(x+(1-jan1), unit='days') )
return result.strftime('%Y-%m-%d')
#######################################################################################
def date_conversion(x, year=None, dateformat='%d-%m-%y'):
# year is Fall Year for date
# default interpretations:
# aaaa-bb-cc : Year/Month/Day
# PROBLEMATIC:
# aa-bb-cc : Month/Day/Year - or Day/Month/Year if aa>12
# Returns string
# Unknown / missing
if np.any([True for i in ['(earliest/latest)', '-999','no data','no response',
'unknown', 'missing', 'unknown', 'unkown','none',
# the following are added for postcard data
# 2021-02-07
'died', 'no res','skip','omit','card not received',
'card not returned', 'moved','nursing home','delete']
if i in str(x).lower()]):
return '-999'
elif (str(x).strip()=='') | (str(x).strip()=='?') | (str(x).strip()=='-?-'):
return '-999'
elif x in ['0',0]:
return '0'
xx = str(x)
if ('+1' in xx) | ('+2' in xx) | ('+3' in xx):
xx = xx.split('+')[0].strip()
outofbounds = False
if ((year < 1678) | (year > 2262)) & (year is not None):
outofbounds = True
if ((len(xx)==8) | ((len(xx)==10))) & ('-' not in xx) & ('/' not in xx):
#print xx, year
if (xx[-2]=='.') | ((len(xx)==8) & (xx.isdigit())):
xx = '{}-{}-{}'.format(xx[:4],xx[4:6],xx[6:8]) # year, month, day
#print xx, year
try:
if (len(xx)==8 ) & ('-' in xx):
xdt = pd.to_datetime(xx, format=dateformat)
else:
xdt = pd.to_datetime(xx)
d, m, y = xdt.day, xdt.month, xdt.year
except ValueError as e:
if (len(xx)==8) & ('-' in xx):
# mostly a problem if 00-02-28 (i.e., thinking 00 is a month)
if (xx[2]=='-') & (xx[5]=='-'):
xx = '19'+xx
else:
xx = xx+', {}'.format(year)
elif (len(xx)==10)& ('-' in xx) & outofbounds:
if len(xx.split('-')[0]) >2:
y,m, d = (int(i) for i in xx.split('-'))
else:
d,m,y = (int(i) for i in xx.split('-'))
# latest thaw in August; earliest freeze in August
if ((m<=8) & (y== year+1)) | ((m>=8) & (y==year)):
return '{:04d}-{:02d}-{:02d}'.format(y,m,d)
else:
print ('+++++PROBLEM+++++')
print(xx)
xx = xx+', {}'.format(year)
else:
xx = xx+', {}'.format(year)
try:
xdt = | pd.to_datetime(xx) | pandas.to_datetime |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# ToDo: this must be datetime64
assert res["a"].dtype == "float64"
# ToDo: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["TD"].dtype == "timedelta64[ns]"
def test_combine_first_period(self):
data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7])
data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
df2 = DataFrame({"P": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.PeriodIndex(
["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
)
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == data1.dtype
# different freq
dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
df2 = DataFrame({"P": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = [
pd.Period("2011-01", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.NaT,
pd.Period("2012-01-02", freq="D"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == "object"
def test_combine_first_int(self):
# GH14687 - integer series that do no align exactly
df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
df2 = DataFrame({"a": [1, 4]}, dtype="int64")
result_12 = df1.combine_first(df2)
expected_12 = DataFrame({"a": [0, 1, 3, 5]})
tm.assert_frame_equal(result_12, expected_12)
result_21 = df2.combine_first(df1)
expected_21 = DataFrame({"a": [1, 4, 3, 5]})
tm.assert_frame_equal(result_21, expected_21)
@pytest.mark.parametrize("val", [1, 1.0])
def test_combine_first_with_asymmetric_other(self, val):
# see gh-20699
df1 = DataFrame({"isNum": [val]})
df2 = DataFrame({"isBool": [True]})
res = df1.combine_first(df2)
exp = DataFrame({"isBool": [True], "isNum": [val]})
tm.assert_frame_equal(res, exp)
def test_combine_first_string_dtype_only_na(self):
# GH: 37519
df = DataFrame({"a": ["962", "85"], "b": [pd.NA] * 2}, dtype="string")
df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype="string")
df.set_index(["a", "b"], inplace=True)
df2.set_index(["a", "b"], inplace=True)
result = df.combine_first(df2)
expected = DataFrame(
{"a": ["962", "85"], "b": [pd.NA] * 2}, dtype="string"
).set_index(["a", "b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"scalar1, scalar2",
[
(datetime(2020, 1, 1), datetime(2020, 1, 2)),
(pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")),
(pd.Timedelta("89 days"), pd.Timedelta("60 min")),
(pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")),
],
)
def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture):
# GH28481
na_value = nulls_fixture
frame = DataFrame([[na_value, na_value]], columns=["a", "b"])
other = DataFrame([[scalar1, scalar2]], columns=["b", "c"])
common_dtype = find_common_type([frame.dtypes["b"], other.dtypes["b"]])
if is_dtype_equal(common_dtype, "object") or frame.dtypes["b"] == other.dtypes["b"]:
val = scalar1
else:
val = na_value
result = frame.combine_first(other)
expected = DataFrame([[na_value, val, scalar2]], columns=["a", "b", "c"])
expected["b"] = expected["b"].astype(common_dtype)
tm.assert_frame_equal(result, expected)
def test_combine_first_timestamp_bug_NaT():
# GH28481
frame = DataFrame([[pd.NaT, pd.NaT]], columns=["a", "b"])
other = DataFrame(
[[datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["b", "c"]
)
result = frame.combine_first(other)
expected = DataFrame(
[[pd.NaT, datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_combine_first_with_nan_multiindex():
# gh-36562
mi1 = MultiIndex.from_arrays(
[["b", "b", "c", "a", "b", np.nan], [1, 2, 3, 4, 5, 6]], names=["a", "b"]
)
df = DataFrame({"c": [1, 1, 1, 1, 1, 1]}, index=mi1)
mi2 = MultiIndex.from_arrays(
[["a", "b", "c", "a", "b", "d"], [1, 1, 1, 1, 1, 1]], names=["a", "b"]
)
s = | Series([1, 2, 3, 4, 5, 6], index=mi2) | pandas.Series |
from datetime import datetime
import json
import pandas as pd
import iso8601 as iso
from dateutil import tz
import platform
def generate_excel(file_loc, export_loc):
# file_loc = r"C:\Users\user\PycharmProjects\MVIPostToExcel\mission-victory-india.ghost.2020-12-19-15-33-27.json"
ist = tz.gettz("Asia/Calcutta")
curr_dt = datetime.now().astimezone(ist).replace(tzinfo=None)
# export_loc = r"C:\Users\user\PycharmProjects\MVIPostToExcel"
export_name = "MVIGhostExport_" + curr_dt.strftime("%d-%m-%Y_%H-%M-%S") + ".xlsx"
print("Starting Excel Export")
print("Time: " + str(curr_dt))
print("Loading Exported JSON Data File from " + file_loc)
json_file = open(file_loc, "r", encoding="utf8")
print("Loaded Exported JSON Data File Successfully")
print("Creating JSON Object from File")
json_object = json.load(json_file)
print("Created JSON Object from File Successfully")
print("Creating DataFrame with Columns: " + "SNo, " + "Title, " + "Slug, " + "Type, " + "Status, " +
"Created At (IST), " + "Updated At (IST), " + "Published At (IST), " + "Author, " + "Ghost Editor Link")
dataframe = pd.DataFrame(columns=["SNo", "Title", "Slug", "Type", "Status", "Created At (IST)", "Updated At (IST)",
"Published At (IST)", "Author", "Ghost Editor Link"])
summary = | pd.DataFrame(columns=["Export Date (IST)", "Exported Records", "Input JSON Path", "Excel Export Path"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# Test for issue #15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_flex_filled_mixed_dtypes(self):
# GH#19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
class TestFrameMulDiv(object):
"""Tests for DataFrame multiplication and division"""
# ------------------------------------------------------------------
# Mod By Zero
def test_df_mod_zero_df(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
result = df % df
tm.assert_frame_equal(result, expected)
def test_df_mod_zero_array(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong, as the integer portion is coerced to float
# ###
first = pd.Series([0, 0, 0, 0], dtype='float64')
second = pd.Series([np.nan, np.nan, np.nan, 0])
expected = pd.DataFrame({'first': first, 'second': second})
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values % df.values
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_int(self):
# GH#3590, modulo as ints
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df % 0
expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') % 0
result2 = pd.DataFrame(arr, index=df.index, columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_mod_zero_series_does_not_commute(self):
# GH#3590, modulo as ints
# not commutative with series
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser % df
res2 = df % ser
assert not res.fillna(0).equals(res2.fillna(0))
# ------------------------------------------------------------------
# Division By Zero
def test_df_div_zero_df(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / df
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
tm.assert_frame_equal(result, expected)
def test_df_div_zero_array(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
first = pd.Series([1.0, 1.0, 1.0, 1.0])
second = pd.Series([np.nan, np.nan, np.nan, 1])
expected = pd.DataFrame({'first': first, 'second': second})
with np.errstate(all='ignore'):
arr = df.values.astype('float') / df.values
result = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_df_div_zero_int(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = df / 0
expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns)
expected.iloc[0:3, 1] = np.nan
tm.assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatment
with np.errstate(all='ignore'):
arr = df.values.astype('float64') / 0
result2 = pd.DataFrame(arr, index=df.index,
columns=df.columns)
tm.assert_frame_equal(result2, expected)
def test_df_div_zero_series_does_not_commute(self):
# integer div, but deal with the 0's (GH#9144)
df = pd.DataFrame(np.random.randn(10, 5))
ser = df[0]
res = ser / df
res2 = df / ser
assert not res.fillna(0).equals(res2.fillna(0))
class TestFrameArithmetic(object):
@pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano',
strict=True)
def test_df_sub_datetime64_not_ns(self):
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
res = df - dt64
expected = pd.DataFrame([pd.Timedelta(days=0), pd.Timedelta(days=1),
pd.Timedelta(days=2)])
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize('data', [
[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT],
['x', 'y', 1]])
@pytest.mark.parametrize('dtype', [None, object])
def test_df_radd_str_invalid(self, dtype, data):
df = | pd.DataFrame(data, dtype=dtype) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 18:54:29 2019
@author: suvodeepmajumder
"""
import sys
sys.path.append("..")
from pygit2 import clone_repository
from pygit2 import GIT_SORT_TOPOLOGICAL, GIT_SORT_REVERSE,GIT_MERGE_ANALYSIS_UP_TO_DATE,GIT_MERGE_ANALYSIS_FASTFORWARD,GIT_MERGE_ANALYSIS_NORMAL,GIT_RESET_HARD
from pygit2 import Repository
import shutil,os
import pygit2
from git_log import git2repo
import os
import re
import shlex
import numpy as np
import pandas as pd
from glob2 import glob, iglob
import subprocess as sp
import understand as und
from pathlib import Path
from pdb import set_trace
import sys
from collections import defaultdict
from utils.utils import utils
import platform
from os.path import dirname as up
from multiprocessing import Pool, cpu_count
import threading
from multiprocessing import Queue
from threading import Thread
import random
import string
#from main.utils.utils.utils import printProgressBar
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
#print(type(self._target))
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
class MetricsGetter(object):
"""
Generate class, file, function, object oriented metrics for a project.
Parameters
----------
sources_path: str or pathlib.PosixPath
Notes
-----
The class is designed to run in conjunction with a context manager.
"""
def __init__(self,repo_url,repo_name,repo_lang,code_path):
self.repo_url = repo_url
self.repo_name = repo_name
self.repo_lang = repo_lang
#self.repo_obj = git2repo.git2repo(self.repo_url,self.repo_name)
self.root_dir = code_path
print("root:",self.root_dir)
if platform.system() == 'Darwin' or platform.system() == 'Linux':
self.repo_path = self.root_dir+ '/commit_guru/ingester/CASRepos/git/' + self.repo_name
self.file_path = up(self.root_dir) + '/data/commit_guru/' + self.repo_name + '.csv'
#self.committed_file = up(os.getcwd()) + '/data/committed_files/' + self.repo_name + '_committed_file.pkl'
self.und_file = up(self.root_dir) + '/data/understand_files/' + self.repo_name + '_understand.csv'
else:
self.repo_path = up(os.getcwd()) + '\\temp_repo\\' + self.repo_name
self.file_path = up(os.getcwd()) + '\\data\\commit_guru\\' + self.repo_name + '.pkl'
#self.committed_file = up(os.getcwd()) + '\\data\\committed_files\\' + self.repo_name + '_committed_file.pkl'
self.buggy_clean_pairs = self.read_commits()
#self.buggy_clean_pairs = self.buggy_clean_pairs[0:5]
# Reference current directory, so we can go back after we are done.
self.cwd = Path('/tmp/smajumd3/')
#self.repo = self.clone_repo()
# Generate path to store udb files
#self.udb_path = self.cwd.joinpath(".temp", "udb")
self.udb_path = self.cwd.joinpath("temp", "udb/"+self.repo_name)
# Create a folder to hold the udb files
if not self.udb_path.is_dir():
os.makedirs(self.udb_path)
def read_commits(self):
df = pd.read_csv(self.file_path)
# print(df)
df = df[df['contains_bug'] == True]
df = df.reset_index('drop' == True)
self.commits = []
commits = []
for i in range(df.shape[0]):
try:
committed_files = []
if df.loc[i,'parent_hashes'] == None:
continue
bug_fixing_commit = df.loc[i,'parent_hashes']
bug_existing_commit = df.loc[i,'commit_hash']
files_changed = df.loc[i,'fileschanged']
#print(files_changed)
files_changed = files_changed.split(',')
files_changed = list(filter(('CAS_DELIMITER').__ne__, files_changed))
self.commits.append(bug_existing_commit)
#language = "Python"
language = self.repo_lang
if bug_fixing_commit == None:
print(df.iloc[i,0])
continue
for row in files_changed:
if language == "Java" or language == "C++" or language == "C":
if len(row.split('src/')) == 1:
continue
committed_files.append(row.split('src/')[1].replace('/','.').rsplit('.',1)[0])
elif language == "Python" :
committed_files.append(row['file_path'].replace('/', '.').rsplit('.', 1)[0])
elif language == "Fortran" :
committed_files.append(row['file_path'].replace('/', '.').rsplit('.', 1)[0])
else:
print("Language under construction")
commits.append([bug_existing_commit,bug_fixing_commit,committed_files])
except Exception as e:
print(e)
continue
return commits
def get_defective_pair_metrics(self):
"""
Use the understand tool's API to generate metrics
Notes
-----
+ For every clean and buggy pairs of hashed, do the following:
1. Get the diff of the files changes
2. Checkout the snapshot at the buggy commit
3. Compute the metrics of the files in that commit.
4. Next, checkout the snapshot at the clean commit.
5. Compute the metrics of the files in that commit.
"""
metrics_dataframe = pd.DataFrame()
print(len(self.buggy_clean_pairs))
for i in range(len(self.buggy_clean_pairs)):
try:
buggy_hash = self.buggy_clean_pairs[i][0]
clean_hash = self.buggy_clean_pairs[i][1]
files_changed = self.buggy_clean_pairs[i][2]
# if len((files_changed)) == 0:
# continue
print(i,self.repo_name,(buggy_hash, clean_hash))
# Go the the cloned project path
buggy_und_file = self.udb_path.joinpath("{}_{}.udb".format(self.repo_name+buggy_hash, "buggy"))
#print(self.buggy_und_file)
db_buggy = und.open(str(buggy_und_file))
#continue
print((db_buggy.metrics()))
metrics = db_buggy.metric(db_buggy.metrics())
print(metrics)
break
#print("Files",set(files_changed))
for file in db_buggy.ents("Class"):
# print directory name
# print(file,file.longname(), file.kind())
#language = "Python"
language = self.repo_lang
if language == "Java" or language == "C++" or language == "C":
r = re.compile(str(file.longname()))
newlist = list(filter(r.search, list(set(files_changed))))
elif language == "Python" :
if file.library() == "Standard":
continue
temp_str = file.longname().split(".")[-2]
r = re.compile(str(temp_str))
newlist = list(filter(r.search, list(set(files_changed))))
elif language == "Fortran" :
if file.library() == "Standard":
continue
t3 = file.longname()
t7 = file.refs()
t8 = file.ref()
comp = str(file).split(".")[0]
# print("-------Here is the library : ",file.library())
# r = re.compile(str(file.longname()))
# temp_str = file.longname().split(".")[-2]
r = re.compile(comp)
newlist = list(filter(r.search, list(set(files_changed))))
else:
newlist = []
print("Language under construction")
if len(newlist) > 0:
metrics = file.metric(file.metrics())
print(len(file.metrics()))
metrics["commit_hash"] = buggy_hash
metrics["Name"] = file.longname()
metrics["Bugs"] = 1
metrics_dataframe = metrics_dataframe.append(
pd.Series(metrics), ignore_index=True)
else:
metrics = file.metric(file.metrics())
print(len(file.metrics()))
metrics["commit_hash"] = buggy_hash
metrics["Name"] = file.longname()
metrics["Bugs"] = 0
metrics_dataframe = metrics_dataframe.append(
pd.Series(metrics), ignore_index=True)
# Purge und file
db_buggy.close()
clean_und_file = self.udb_path.joinpath("{}_{}.udb".format(self.repo_name+buggy_hash, "buggy"))
db_clean = und.open(str(clean_und_file))
for file in db_clean.ents("class"):
# print directory name
#language = "Python"
language = self.repo_lang
if language == "Java" or language == "C++" or language == "C":
r = re.compile(str(file.longname()))
newlist = list(filter(r.search, list(set(files_changed))))
elif language == "Python" :
if file.library() == "Standard":
continue
temp_str = file.longname().split(".")[-2]
r = re.compile(str(temp_str))
newlist = list(filter(r.search, list(set(files_changed))))
elif language == "Fortran" :
if file.library() == "Standard":
continue
t3 = file.longname()
t7 = file.refs()
t8 = file.ref()
comp = str(file).split(".")[0]
# print("-------Here is the library : ",file.library())
# r = re.compile(str(file.longname()))
# temp_str = file.longname().split(".")[-2]
r = re.compile(comp)
newlist = list(filter(r.search, list(set(files_changed))))
else:
newlist = []
print("Language under construction")
if len(newlist) > 0:
metrics = file.metric(file.metrics())
#print(metrics)
metrics["commit_hash"] = clean_hash
metrics["Name"] = file.name()
metrics["Bugs"] = 0
metrics_dataframe = metrics_dataframe.append(
pd.Series(metrics), ignore_index=True)
else:
metrics = file.metric(file.metrics())
#print(metrics)
metrics["commit_hash"] = clean_hash
metrics["Name"] = file.name()
metrics["Bugs"] = 0
metrics_dataframe = metrics_dataframe.append(
| pd.Series(metrics) | pandas.Series |
import os
from flask import jsonify, request
from server import app
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from aif360.sklearn.metrics import disparate_impact_ratio, base_rate, consistency_score
def bias_table(Y, prot_attr=None, instance_type=None):
groups = Y.index.unique(prot_attr)
with np.errstate(divide='ignore', invalid='ignore'):
pct = [Y.xs(g, level=prot_attr).shape[0]/Y.shape[0] for g in groups]
data = [[np.divide(1, disparate_impact_ratio(Y[stage].dropna() == outcome, prot_attr=prot_attr, priv_group=g))
for stage in Y.columns for outcome in Y[stage].unique() if not pd.isna(outcome)]
for g in groups]
pct_name = 'proportion at first stage' if instance_type is None else f'proportion of {instance_type}'
num_stages = len(data[0])
col = pd.MultiIndex.from_tuples([(pct_name, '')]
+ list(zip(['disparate impact']*num_stages, [f'{stage} -> {outcome}' for stage in Y.columns for outcome in Y[stage].unique() if not pd.isna(outcome)])))
table = | pd.DataFrame(np.c_[pct, data], columns=col, index=groups) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 10:45:35 2020
@author: yashr
"""
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation,Layer,Lambda
forestfires = pd.read_csv("fireforests.csv")
#As dummy variables are already created, we will remove the month and alsoday columns
forestfires.drop(["month","day"],axis=1,inplace = True)
forestfires["temp"].value_counts()
print(forestfires["temp"].value_counts())
forestfires.isnull().sum()
print(forestfires.isnull().sum())
forestfires.describe()
print(forestfires.describe())
##I am taking small as 0 and large as 1
forestfires.loc[forestfires["temp"]=='small','temp']=0
forestfires.loc[forestfires["temp"]=='large','temp']=1
forestfires["temp"].value_counts()
#Normalization being done.
def norm_func(i):
x = (i-i.min()) / (i.max() - i.min())
return (x)
predictors = forestfires.iloc[:,0:28]
target = forestfires.iloc[:,28]
predictors1 = norm_func(predictors)
#data = pd.concat([predictors1,target],axis=1)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test= train_test_split(predictors1,target, test_size=0.3,stratify = target)
def prep_model(hidden_dim):
model = Sequential()
for i in range(1,len(hidden_dim)-1):
if (i==1):
model.add(Dense(hidden_dim[i],input_dim=hidden_dim[0],activation="relu"))
else:
model.add(Dense(hidden_dim[i],activation="relu"))
model.add(Dense(hidden_dim[-1],kernel_initializer="normal",activation="sigmoid"))
model.compile(loss="binary_crossentropy",optimizer = "rmsprop",metrics = ["accuracy"])
return model
predictors1 = norm_func(predictors)
#data = pd.concat([predictors1,target],axis=1)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test= train_test_split(predictors1,target, test_size=0.3,stratify = target)
def prep_model(hidden_dim):
model = Sequential()
for i in range(1,len(hidden_dim)-1):
if (i==1):
model.add(Dense(hidden_dim[i],input_dim=hidden_dim[0],activation="relu"))
else:
model.add(Dense(hidden_dim[i],activation="relu"))
model.add(Dense(hidden_dim[-1],kernel_initializer="normal",activation="sigmoid"))
model.compile(loss="binary_crossentropy",optimizer = "rmsprop",metrics = ["accuracy"])
return model
#y_train = pd.DataFrame(y_train)
first_model = prep_model([28,50,40,20,1])
first_model.fit(np.array(x_train),np.array(y_train),epochs=500)
pred_train = first_model.predict(np.array(x_train))
#Converting the predicted values to series
pred_train = pd.Series([i[0] for i in pred_train])
size = ["small","large"]
pred_train_class = pd.Series(["small"]*361)
pred_train_class[[i>0.5 for i in pred_train]]= "large"
train = pd.concat([x_train,y_train],axis=1)
train["size_category"].value_counts()
#For training data
from sklearn.metrics import confusion_matrix
train["original_class"] = "small"
train.loc[train["size_category"]==1,"original_class"] = "large"
train.original_class.value_counts()
confusion_matrix(pred_train_class,train["original_class"])
np.mean(pred_train_class==pd.Series(train["original_class"]).reset_index(drop=True)) #100%
pd.crosstab(pred_train_class,pd.Series(train["original_class"]).reset_index(drop=True))
#For test data
pred_test = first_model.predict(np.array(x_test))
pred_test = pd.Series([i[0] for i in pred_test])
pred_test_class = pd.Series(["small"]*156)
pred_test_class[[i>0.5 for i in pred_test]] = "large"
test =pd.concat([x_test,y_test],axis=1)
test["original_class"]="small"
test.loc[test["size_category"]==1,"original_class"] = "large"
test["original_class"].value_counts()
np.mean(pred_test_class==pd.Series(test["original_class"]).reset_index(drop=True)) # 85%
confusion_matrix(pred_test_class,test["original_class"])
pd.crosstab(pred_test_class, | pd.Series(test["original_class"]) | pandas.Series |
import pandas as pd
from intake_dal.dal_catalog import DalCatalog
"""
Test setup:
- batch storage mode driver is parquet and '{{ CATALOG_DIR }}/data/user_events.parquet' has ONLY 1 row
- local storage mode driver is csv and '{{ CATALOG_DIR }}/data/user_events.csv' has TWO rows
"""
def test_dal_catalog_default_storage_parameter(cat):
# cat default is local -> csv plugin
assert cat.entity.user.user_events().read().head().shape[0] == 2
assert cat.entity.user.user_events(storage_mode="local").read().head().shape[0] == 2
assert cat.entity.user.user_events(storage_mode="batch").read().head().shape[0] == 1
assert cat.entity.user.user_events(storage_mode="in_mem").read().head().shape[0] == 4
assert (
cat.entity.user.user_events(storage_mode="local_test", data_path="data").read().head().shape[0] == 2
)
df = | pd.DataFrame({"key": ["a", "first"], "value": [3, 42]}) | pandas.DataFrame |
#!/usr/bin/env python
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
import matplotlib.pyplot as plt
import seaborn as sns
import pysam
from pysamiterators import CachedFasta, MatePairIterator
# Molecule modules:
from singlecellmultiomics.molecule import TranscriptMolecule, MoleculeIterator
from singlecellmultiomics.fragment import SingleEndTranscriptFragment
from singlecellmultiomics.features import FeatureContainer
# Conversion modules:
from singlecellmultiomics.variants.substitutions import conversion_dict_stranded
from singlecellmultiomics.variants import substitution_plot, vcf_to_position_set
from singlecellmultiomics.utils import reverse_complement, complement
from collections import defaultdict, Counter
from singlecellmultiomics.utils import is_main_chromosome
from singlecellmultiomics.bamProcessing import sorted_bam_file, merge_bams
from scipy import stats
from multiprocessing import Pool
import os
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import pickle
import gzip
from uuid import uuid4
def substitution_plot_stranded(pattern_counts: dict,
figsize: tuple = (12, 4),
conversion_colors: tuple = ('b', 'k', 'r', 'grey', 'g', 'pink','b','k','r','k','w','g'),
ylabel: str = '# conversions per molecule',
add_main_group_labels: bool = True,
ax=None,fig=None,
**plot_args
):
"""
Create 3bp substitution plot
Args:
pattern_counts(OrderedDict) : Dictionary containing the substitutions to plot.
Use variants.vcf_to_variant_contexts to create it.
Format:
```OrderedDict([(('ACA', 'A'), 0),
(('ACC', 'A'), 1),
(('ACG', 'A'), 0),
...
(('TTG', 'G'), 0),
(('TTT', 'G'), 0)])```
figsize(tuple) : size of the figure to create
conversion_colors(tuple) : colors to use for the conversion groups
ylabel(str) : y axis label
add_main_group_labels(bool) : Add conversion group labels to top of plot
**plot_args : Additional argument to pass to .plot()
Returns
fig : handle to the figure
ax : handle to the axis
Example:
>>> from singlecellmultiomics.variants import vcf_to_variant_contexts, substitution_plot
>>> import matplotlib.pyplot as plt
>>> pobs = vcf_to_variant_contexts('variants.vcf.gz', 'reference.fasta')
>>> for sample, conversions in pobs.items():
>>> fig, ax = substitution_plot(conversions)
>>> ax.set_title(sample)
>>> plt.show()
"""
conversions_single_nuc = ('AC', 'AG', 'AT', 'CA', 'CG', 'CT', 'GA', 'GC', 'GT', 'TA', 'TC', 'TG')
# Colors for the conversion groups:
color_d = dict(zip(conversions_single_nuc, conversion_colors))
colors = [color_d.get(f'{context[1]}{to}') for context, to in pattern_counts.keys()]
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
substitution_dataframe = pd.DataFrame(pattern_counts.values(), index=list(pattern_counts.keys())).T
substitution_dataframe.plot(kind='bar', color=colors, legend=False, width=1.0, ax=ax, edgecolor='k', **plot_args)
offset = (1 / len(pattern_counts)) * 0.5 # Amount of distance for a half bar
# Add 3bp context ticks:
ax.set_xticks(np.linspace(-0.5 + offset, 0.5 - offset, len(pattern_counts)))
ax.set_xticklabels( [context for context, to in pattern_counts.keys()], rotation=90, size=5)
ax.set_ylabel(ylabel)
ax.set_xlim((-0.5, 0.5))
sns.despine()
if add_main_group_labels:
for i, (u, v) in enumerate(conversions_single_nuc):
ax.text( # position text relative to Axes
(i + 0.5) / len(conversions_single_nuc), 1.0, f'{u}>{v}', fontsize=8,
ha='center', va='top',
transform=ax.transAxes,bbox=dict(facecolor='white', alpha=1,lw=0)
)
return fig, ax
if __name__=='__main__':
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Assign molecules')
argparser.add_argument('bamin', type=str, help='Input BAM file')
argparser.add_argument('-o', type=str, help="output bam file (.bam)", required=True)
argparser.add_argument('-reference', type=str, help="Reference_path (.fasta)", required=True)
argparser.add_argument('-known', type=str, help="Known variants (vcf)", required=True)
argparser.add_argument('-exons', type=str, help="exons (gtf.gz)", required=True)
argparser.add_argument('-introns', type=str, help="introns (gtf.gz)", required=True)
argparser.add_argument('--R2_based', help="The input is only R2 sequences, the molcule mapping direction will be inverted", action='store_true')
argparser.add_argument('-temp_dir', type=str, help="scmo_temp", default=str(uuid4()))
argparser.add_argument('-tagthreads', type=int, help="Amount of threads used (int)", required=True)
args = argparser.parse_args()
single_cell_bam_path = args.bamin
reference_path = args.reference
# Load known variation, to ignore for mut-spectrum
known_vcf_path = args.known
# Paths to gene models
exons_gtf_path = args.exons
introns_gtf_path = args.introns
# Write a tagged bam file to this path:
tagged_output_path = args.o
#####
def obtain_conversions(contig : str):
""" Create conversion dictionary for the suppled contig
Args:
contig (str)
Returns:
conversions_per_library (defaultdict( conversion_dict_stranded ) ) : Per library conversion dictionary
n_molecules_per_library (Counter) : observed molecules per library
contig(str) : the contig passed to the method
temp_bam_path(str) : path to tagged bam file, tagged with gene annotations and 4su mutation count
"""
conversions_per_library = defaultdict( conversion_dict_stranded )
n_molecules_per_library = Counter()
from singlecellmultiomics.molecule import might_be_variant
# Create temp directory to write tagged bam file to:
temp_dir = args.temp_dir
temp_bam_path = f'{temp_dir}/{contig}.bam'
if not os.path.exists(temp_dir):
try:
os.makedirs(temp_dir)
except Exception as e:
pass
# Load gene annotations for the selected contig:
transcriptome_features = FeatureContainer()
transcriptome_features.loadGTF(
path=exons_gtf_path,
select_feature_type=['exon'],
identifierFields=(
'exon_id',
'gene_id'),
store_all=True,
contig=contig,
head=None)
transcriptome_features.loadGTF(
path=introns_gtf_path,
select_feature_type=['intron'],
identifierFields=['transcript_id'],
store_all=True,
contig=contig,
head=None)
colormap = plt.get_cmap('RdYlBu_r')
colormap.set_bad((0,0,0))
read_groups = {}
try:
with pysam.AlignmentFile(single_cell_bam_path, threads=4) as alignments, \
pysam.VariantFile(known_vcf_path) as known, \
sorted_bam_file(temp_bam_path, origin_bam=single_cell_bam_path, read_groups=read_groups, fast_compression=True) as out, \
pysam.FastaFile(reference_path) as reference_handle:
# Cache the sequence of the contig: (faster)
reference = CachedFasta(reference_handle)
for n_molecules, molecule in enumerate(MoleculeIterator(alignments,
TranscriptMolecule,
SingleEndTranscriptFragment,
fragment_class_args = {
'stranded':True,
'features':transcriptome_features
},
molecule_class_args={
'reference':reference,
'features':transcriptome_features,
'auto_set_intron_exon_features': True
}, contig=contig
)):
# Read out mut spectrum
consensus = molecule.get_consensus()
if args.R2_based:
molecule.strand = not molecule.strand # Invert becayse its R2 based.
n_molecules_per_library[molecule.library] += 1
n_4su_mutations = 0
n_4su_contexts = 0
for (chrom,pos), base in consensus.items():
context = reference.fetch(chrom, pos-1, pos+2).upper()
if len(context)!=3:
continue
if ( (context[1]=='A' and not molecule.strand) or (context[1]=='T' and molecule.strand) ) :
n_4su_contexts+=1
# Check if the base matches or the refence contains N's
if context[1]==base or 'N' in context or len(context)!=3:
continue
# Ignore germline variants:
if might_be_variant(chrom, pos, known):
continue
if not molecule.strand: # reverse template
context = reverse_complement(context)
base = complement(base)
# Count 4SU specific mutations, and write to molecule later
if context[1]=='T' and base=='C':
n_4su_mutations+=1
conversions_per_library[molecule.library][(context, base)] += 1
# Write 4su modification to molecule
molecule.set_meta('4S',n_4su_mutations)
molecule.set_meta('4c',n_4su_contexts)
# Set read color based on conversion rate:
try:
# The max color value will be 10% modification rate
cfloat = colormap( np.clip( 10*(n_4su_mutations/n_4su_contexts),0,1) )[:3]
except Exception as e:
cfloat = colormap._rgba_bad[:3]
molecule.set_meta('YC', '%s,%s,%s' % tuple((int(x * 255) for x in cfloat)))
molecule.set_meta('4c',n_4su_contexts)
molecule.write_tags()
for fragment in molecule:
rgid = fragment.get_read_group()
if not rgid in read_groups:
read_groups[rgid] = fragment.get_read_group(True)[1]
# Write tagged molecule to output file
molecule.write_pysam(out)
except KeyboardInterrupt:
# This allows you to cancel the analysis (CTRL+C) and get the current result
pass
return conversions_per_library, n_molecules_per_library, contig, temp_bam_path
n_molecules_per_library = Counter()
with Pool(args.tagthreads) as workers:
conversions_per_library = defaultdict( conversion_dict_stranded ) # library : (context, query) : obs (int)
# Obtain all contigs from the input bam file, exclude scaffolds:
with pysam.AlignmentFile(single_cell_bam_path) as alignments:
contigs = [contig for contig in alignments.references if is_main_chromosome(contig) and contig not in ['MT','Y'] ]
# Run conversion detection on all contigs in parallel:
tagged_bams = []
for conversions_for_contig, \
n_molecules_for_contig_per_lib, \
contig, \
temp_tagged_bam in workers.imap_unordered(obtain_conversions, contigs):
# Merge conversion dictionary:
for library, library_convs in conversions_for_contig.items():
for context, observations in library_convs.items():
conversions_per_library[library][context] += observations
n_molecules_per_library+=n_molecules_for_contig_per_lib
print(f'finished {contig} ', end='\r')
tagged_bams.append(temp_tagged_bam)
# Merge:
print(f'Merging ', end='\r')
merge_bams(tagged_bams, tagged_output_path)
# Normalize observed counts to the amount of molecules we saw:
for library, library_convs in conversions_per_library.items():
for context, observations in library_convs.items():
library_convs[context] = observations / n_molecules_per_library[library]
try:
fig, axes = plt.subplots(len(conversions_per_library),1, figsize=(16,4*(len(conversions_per_library))), sharey=True )
if len(conversions_per_library)==1:
axes = [axes]
for ax, (library, conversions) in zip(axes,conversions_per_library.items()):
substitution_plot_stranded(conversions,fig=fig, ax=ax,ylabel='conversions seen per molecule')
ax.set_axisbelow(True)
ax.grid(axis='y')
ax.set_title(f'{library}, {n_molecules_per_library[library]} molecules')
fig.tight_layout(pad=3.0)
plt.savefig(tagged_output_path.replace('.bam','conversions.png'))
except Exception as e:
print(e)
# Count amount of 4sU conversions per cell, per gene
def listionary():
return defaultdict(list)
expression_per_cell_per_gene = defaultdict(Counter) # gene -> cell -> obs
four_su_per_cell_per_gene = defaultdict(listionary ) # cell -> gene -> [] 4_su observation counts per molecule
four_su_per_gene_per_cell = defaultdict(listionary ) # gene -> cell -> [] 4_su observation counts per molecule
with pysam.AlignmentFile(tagged_output_path) as reads:
for R1,R2 in MatePairIterator(reads):
for read in (R1,R2): # Count every fragment only once by selecting one of the two reads.
if read is not None:
break
if read.has_tag('gn'):
gene = read.get_tag('gn')
elif read.has_tag('GN'):
gene = read.get_tag('GN')
else:
continue
if read.is_duplicate:
continue
cell = read.get_tag('SM')
foursu = read.get_tag('4S')
foursu_contexts = read.get_tag('4c')
library = read.get_tag('LY')
cell = cell.split('_')[1] # Remove library part
expression_per_cell_per_gene[gene][(library,cell)] += 1
if foursu_contexts>0:
four_su_per_gene_per_cell[gene][(library,cell)].append(foursu/foursu_contexts)
four_su_per_cell_per_gene[(library,cell)][gene].append(foursu/foursu_contexts)
assert not (foursu>0 and foursu_contexts==0)
# Store these dictionaries to disk
with gzip.open( tagged_output_path.replace('.bam','4sU_per_gene_per_cell.dict.pickle.gz'),'wb' ) as o:
pickle.dump(four_su_per_gene_per_cell, o)
with gzip.open( tagged_output_path.replace('.bam','4sU_per_cell_per_gene.dict.pickle.gz'),'wb' ) as o:
pickle.dump(four_su_per_cell_per_gene, o)
with gzip.open( tagged_output_path.replace('.bam','expression_per_cell_per_gene.pickle.gz'),'wb' ) as o:
pickle.dump(expression_per_cell_per_gene, o)
four_su_per_gene_per_cell_mean = defaultdict(dict)
four_su_per_gene_per_cell_total= defaultdict(dict)
for gene in four_su_per_gene_per_cell:
for cell, fsu_obs in four_su_per_gene_per_cell[gene].items():
four_su_per_gene_per_cell_mean[gene][cell] = np.mean(fsu_obs)
four_su_per_gene_per_cell_total[gene][cell] = np.sum( np.array(fsu_obs)>0 )
four_su_per_gene_per_cell_mean = pd.DataFrame(four_su_per_gene_per_cell_mean).T
four_su_per_gene_per_cell_total = pd.DataFrame(four_su_per_gene_per_cell_total).T
four_su_per_gene_per_cell_mean.to_csv(tagged_output_path.replace('.bam','4sU_labeled_ratio.csv.gz'))
expression_matrix = | pd.DataFrame(four_su_per_gene_per_cell) | pandas.DataFrame |
import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
from mlnext import pipeline
class TestColumnSelector(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_select_columns(self):
t = pipeline.ColumnSelector(keys=['a'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestColumnDropper(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_drop_columns(self):
t = pipeline.ColumnDropper(columns=['b'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop_columns_verbose(self):
t = pipeline.ColumnDropper(columns=['b'], verbose=True)
expected = self.df.loc[:, ['a']]
result = t.transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop__missing_columns(self):
t = pipeline.ColumnDropper(columns=['c'])
with self.assertWarns(Warning):
t.transform(self.df)
class TestColumnRename(TestCase):
def test_rename_columns(self):
t = pipeline.ColumnRename(lambda x: x.split('.')[-1])
df = pd.DataFrame(columns=['a.b.c', 'd.e.f'])
expected = pd.DataFrame(columns=['c', 'f'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestNaDropper(TestCase):
def test_drop_na(self):
t = pipeline.NaDropper()
df = pd.DataFrame([1, 0, pd.NA])
expected = pd.DataFrame([1, 0], dtype=object)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestClip(TestCase):
def test_clip(self):
t = pipeline.Clip(lower=0.5, upper=1.5)
df = pd.DataFrame([[0.1, 0.4, 0.6, 0.8, 1.2, 1.5]])
expected = pd.DataFrame([[0.5, 0.5, 0.6, 0.8, 1.2, 1.5]])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestDatetimeTransformer(TestCase):
# FIXME: fails in gitlab pipeline but succeeds locally
def test_datetime(self):
t = pipeline.DatetimeTransformer(columns=['time'])
df = pd.DataFrame([['2021-01-04 14:12:31']], columns=['time'])
expected = pd.DataFrame([[datetime.datetime(2021, 1, 4, 14, 12, 31)]],
columns=['time'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_missing_cols(self):
t = pipeline.DatetimeTransformer(columns=['t'])
df = pd.DataFrame([['2021-01-04 14:12:31']], columns=['time'])
with self.assertRaises(ValueError):
t.fit_transform(df)
class TestNumericTransformer(TestCase):
# FIXME: fails in gitlab pipeline but succeeds locally
def test_numeric(self):
t = pipeline.NumericTransformer(columns=['1'])
df = pd.DataFrame([0, 1], columns=['1'], dtype=object)
expected = pd.DataFrame([0, 1], columns=['1'], dtype=np.int64)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_missing_column(self):
t = pipeline.NumericTransformer(columns=['2'])
df = pd.DataFrame([0, 1], columns=['1'], dtype=object)
with self.assertRaises(ValueError):
t.fit_transform(df)
def test_numeric_additional_column(self):
t = pipeline.NumericTransformer(columns=['2'])
df = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected['2'] = expected['2'].apply(pd.to_numeric)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_multiple_column(self):
t = pipeline.NumericTransformer(columns=['1', '2'])
df = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected = pd.DataFrame([[0, 1]], columns=['1', '2'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_all_column(self):
t = pipeline.NumericTransformer()
df = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected = pd.DataFrame([[0, 1]], columns=['1', '2'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestTimeframeExtractor(TestCase):
def setUp(self):
self.dates = [datetime.datetime(2021, 10, 1, 9, 50, 0),
datetime.datetime(2021, 10, 1, 10, 0, 0),
datetime.datetime(2021, 10, 1, 11, 0, 0),
datetime.datetime(2021, 10, 1, 12, 0, 0),
datetime.datetime(2021, 10, 1, 12, 10, 0)]
self.values = np.arange(len(self.dates))
self.df = pd.DataFrame(zip(self.dates, self.values),
columns=['time', 'value'])
def test_timeframe_extractor(self):
t = pipeline.TimeframeExtractor(
time_column='time', start_time=datetime.time(10, 0, 0),
end_time=datetime.time(12, 0, 0), verbose=True)
expected = pd.DataFrame(zip(self.dates[1:-1], np.arange(1, 4)),
columns=['time', 'value'])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_timeframe_extractor_invert(self):
t = pipeline.TimeframeExtractor(
time_column='time', start_time=datetime.time(10, 0, 0),
end_time=datetime.time(12, 0, 0), invert=True)
expected = pd.DataFrame(zip([self.dates[0], self.dates[-1]],
np.array([0, 4])),
columns=['time', 'value'])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestDateExtractor(TestCase):
def setUp(self):
self.dates = [datetime.datetime(2021, 10, 1, 9, 50, 0),
datetime.datetime(2021, 10, 2, 10, 0, 0),
datetime.datetime(2021, 10, 3, 11, 0, 0),
datetime.datetime(2021, 10, 4, 12, 0, 0),
datetime.datetime(2021, 10, 5, 12, 10, 0)]
self.values = np.arange(len(self.dates))
self.df = pd.DataFrame(zip(self.dates, self.values),
columns=['date', 'value'])
def test_date_extractor(self):
t = pipeline.DateExtractor(
date_column='date', start_date=datetime.date(2021, 10, 2),
end_date=datetime.date(2021, 10, 4), verbose=True)
expected = pd.DataFrame(zip(self.dates[1:-1], np.arange(1, 4)),
columns=['date', 'value'])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_date_extractor_invert(self):
t = pipeline.DateExtractor(
date_column='date', start_date=datetime.date(2021, 10, 2),
end_date=datetime.date(2021, 10, 4), invert=True)
expected = pd.DataFrame(zip([self.dates[0], self.dates[-1]],
np.array([0, 4])),
columns=['date', 'value'])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestValueMapper(TestCase):
def test_value_mapper_one_column(self):
t = pipeline.ValueMapper(columns=['b'], classes={2.0: 1.0})
df = pd.DataFrame(np.ones((3, 2)) * 2, columns=['a', 'b'])
expected = pd.DataFrame(zip(np.ones((3, 1)) * 2, np.ones((3, 1))),
columns=['a', 'b'], dtype=np.float64)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_value_mapper_all_columns(self):
t = pipeline.ValueMapper(columns=['a', 'b'], classes={2.0: 1.0})
df = pd.DataFrame(np.ones((3, 2)) * 2, columns=['a', 'b'])
expected = pd.DataFrame(np.ones((3, 2)), columns=['a', 'b'],
dtype=np.float64)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_value_mapper_missing_value(self):
t = pipeline.ValueMapper(columns=['a', 'b'], classes={2.0: 1.0})
df = pd.DataFrame(np.ones((3, 2)), columns=['a', 'b'])
expected = pd.DataFrame(np.ones((3, 2)), columns=['a', 'b'],
dtype=np.float64)
with self.assertWarns(Warning):
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestSorter(TestCase):
def setUp(self):
self.df = pd.DataFrame({
'a': [2, 3, 1, 4],
'b': ['A', 'D', 'C', 'B']
})
def test_sorter(self):
t = pipeline.Sorter(columns=['a'])
result = t.fit_transform(self.df)
expected = self.df.copy().sort_values(by=['a'])
pd.testing.assert_frame_equal(result, expected)
def test_sorter_multi_col(self):
t = pipeline.Sorter(columns=['a', 'b'])
result = t.fit_transform(self.df)
expected = self.df.copy().sort_values(by=['a', 'b'])
pd.testing.assert_frame_equal(result, expected)
class TestFill(TestCase):
def setUp(self):
self.df = pd.DataFrame([[0.0, 1.0, 0.2, pd.NA, 0.5]])
def test_fill(self):
t = pipeline.Fill(value=1.0)
expected = pd.DataFrame([[0.0, 1.0, 0.2, 1.0, 0.5]])
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestTimeOffsetTransformer(TestCase):
def test_timeoffset(self):
t = pipeline.TimeOffsetTransformer(
time_columns=['t'], timedelta=pd.Timedelta(1, 'h'))
df = pd.DataFrame({'t': [datetime.datetime(2020, 10, 1, 12, 3, 10)]})
expected = pd.DataFrame(
{'t': [datetime.datetime(2020, 10, 1, 13, 3, 10)]})
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_timeoffset_multi_col(self):
t = pipeline.TimeOffsetTransformer(
time_columns=['t'], timedelta=pd.Timedelta(1, 'h'))
df = pd.DataFrame({'t': [datetime.datetime(2020, 10, 1, 12, 3, 10)],
'tt': [datetime.datetime(2020, 10, 1, 13, 3, 10)]})
expected = pd.DataFrame(
{'t': [datetime.datetime(2020, 10, 1, 13, 3, 10)],
'tt': [datetime.datetime(2020, 10, 1, 13, 3, 10)]})
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestConditionedDropper(TestCase):
def setUp(self):
self.data = [0.0, 0.5, 1.0, 1.2]
self.df = pd.DataFrame({'a': self.data})
def test_dropper(self):
t = pipeline.ConditionedDropper(column='a', threshold=1.0)
expected = pd.DataFrame({'a': self.data[:-1]})
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_dropper_invert(self):
t = pipeline.ConditionedDropper(column='a', threshold=1.0, invert=True)
expected = pd.DataFrame({'a': self.data[-2:]})
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestZeroVarianceDropper(TestCase):
def setUp(self):
self.df = pd.DataFrame({'one': np.ones((4,)),
'zeros': np.zeros((4,)),
'mixed': np.arange(4)})
def test_dropper(self):
t = pipeline.ZeroVarianceDropper(verbose=True)
expected = pd.DataFrame({
'mixed': np.arange(4)
})
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_dropper_fit_higher_variance(self):
t = pipeline.ZeroVarianceDropper()
expected = pd.DataFrame({
'mixed': np.arange(4)
})
t.fit(self.df)
df = self.df.copy()
df.iloc[0, 0] = 0
with self.assertWarns(Warning):
result = t.transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestSignalSorter(TestCase):
def setUp(self):
self.df = pd.DataFrame({'one': np.ones((4,)),
'zeros': np.zeros((4,)),
'mixed': np.arange(4),
'binary': [0, 1, 0, 1],
'cont': [10, 11, 10, 11]})
def test_sorter(self):
t = pipeline.SignalSorter(verbose=True)
expected = self.df.loc[:, ['mixed', 'cont', 'one', 'zeros', 'binary']]
result = t.fit_transform(self.df)
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from django_plotly_dash import DjangoDash
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import plotly.express as px
import dash_table
import dash_daq as daq
import pandas as pd
import numpy as np
import random
import json
import datetime
from collections import Counter
from fall.models import AutumnChanllengeData
from eb_passwords import map_box_api_key
'''
Query outside django...
The time zone is just wrong!!
'''
'''
initial these global variables on page loads
'''
peoples = []
towns = []
upload_time = []
thumbnail = []
random_delay = []
CARD_POSITION = 0
# prevent setup complex map twice
def empty_map():
fig = go.Figure(go.Scattermapbox(lat=['38.91427',],lon=['-77.02827',]))
fig.update_layout(
mapbox=dict(
center=dict(lat=23.973793,lon=120.979703),
zoom=8,
style='white-bg')
)
return fig
def create_score_df():
df = pd.DataFrame.from_records(
AutumnChanllengeData.objects.filter(is_valid=True, survey_datetime__date__gte=datetime.date(2020,10,1)).values(
'creator','survey_datetime','latitude','longitude','county',))
if len(df) == 0:
return pd.DataFrame(dict(挑戰者=[],總清單數=[],佔領鄉鎮數=[],首次佔領鄉鎮=[],特殊得分=[],總得分=[]))
creator_count = df.creator.value_counts()
number_of_checklist = creator_count.tolist()
unique_creator = creator_count.index.tolist()
unique_county = | pd.unique(df.county) | pandas.unique |
#! /usr/bin/env python3
import argparse
import re,sys,os,math,gc
import numpy as np
import pandas as pd
import matplotlib as mpl
import copy
import math
from math import pi
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy import sparse
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import copy
import math
import seaborn as sns
#from scipy.interpolate import BSpline, make_interp_spline
plt.rcParams.update({'figure.max_open_warning': 100000})
plt.style.use('seaborn-colorblind')
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['savefig.dpi'] = 300 #图片像素
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
__author__ ='赵玥'
__mail__ ='<EMAIL>'
_data__ ='20191101'
def draw_boundaries(ax,Boundary_dict,start,end,samplelist,str_x,sam_x):
ax.tick_params(top='off',bottom='off',left='on',right='off')
for loc in ['top','left','right','bottom']:
ax.spines[loc].set_visible(False)
#ax.spines['left'].set_color('k')
#ax.spines['left'].set_linewidth(2)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['left'].set_linewidth(1)
#ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
ax.set_axis_bgcolor('w')
ax.set(xticks=[])
ax.set(yticks=[])
sample1 = samplelist[0]
sample2 = samplelist[1]
boundary_mid1 = Boundary_dict[sample1]['mid'].tolist()
boundary_mid2 = Boundary_dict[sample2]['mid'].tolist()
bound_y1min = [1.25 for i in boundary_mid1]
bound_y1max = [1.75 for i in boundary_mid1]
bound_y2min = [0.25 for i in boundary_mid2]
bound_y2max = [0.75 for i in boundary_mid2]
ax.set_ylim(0,2)
ax.vlines(boundary_mid1,bound_y1min,bound_y1max,lw=2,color='red')
ax.vlines(boundary_mid2,bound_y2min,bound_y2max,lw=2,color='green')
ax.set_xlim(start,end)
ax.text(str_x,0.5,'bound',horizontalalignment='right',verticalalignment='center',rotation='vertical',transform=ax.transAxes,fontsize=8)
ax.text(sam_x,0.75,sample1,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="red",fontsize=8)
ax.text(sam_x,0.25,sample2,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="green",fontsize=8)
def cut_boundaries(Boundary_dict,sample,boundaryPath,chrom,start,end):
Boundary_df = pd.read_table(boundaryPath,header=0,index_col=None,encoding='utf-8')
Boundary_df = Boundary_df.fillna(0)
Boundary_df = Boundary_df[['start','end']]
Boundary_df['mid'] = (Boundary_df['start'] + Boundary_df['end'])/2
Boundary_df = Boundary_df[Boundary_df['mid']>=start]
Boundary_df = Boundary_df[Boundary_df['mid']<=end]
Boundary_df.reset_index(drop=True)
Boundary_dict[sample] = Boundary_df
return Boundary_dict
def draw_insulation(ax,insu,chrs,start,end,color):
#df_insu=cut_insulation(insu,chrs,start,end)
df_insu=pd.read_table(insu,sep='\t',names=['chrs','start','end','insu'])
ax.tick_params(top='off',bottom='off',left='on',right='off')
line=ax.plot(df_insu['start'],df_insu['insu'], color=color, linewidth=0.8, label="insulation")
ax.set_xlim(start,end)
ax.set_xticks([])
ax.set_ylim(df_insu['insu'].min(),df_insu['insu'].max())
#ax.set_yticks([df_insu['insu'].min(),df_insu['insu'].max()])
for loc in ['left','top','bottom']:
ax.spines[loc].set_linewidth(0)
ax.spines[loc].set_color('black')
ax.spines['right'].set_linewidth(0)
ax.spines[loc].set_color('black')
def draw_SV(files,ax,chrom,start,end,sample,color,types):
markdf=pd.read_table(files,sep='\t')
markdf=markdf[markdf['types']==types]
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['sign']=[1]*len(markdf)
#vectorf = np.vectorize(np.float)
#vectori = np.vectorize(np.int)
#starts=list(markdf['start'])
#hight=list(markdf['sign'])
#width=(markdf['width'])
ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
ax.set_xlim([start,end])
ax.set_ylim([0,1])
xts = np.linspace(start,end,2)
yts = np.linspace(0,1,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks([])
#ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def cut_insulation(insu,chrs,start,end):
file=open(insu)
file_list=[]
for i in file:
i=i.strip()
file_list.append(i)
insu_list=[]
for i in range(len(file_list)):
x=file_list[i].split('/')
insu_list.append([x[-2],file_list[i]])
list_df=pd.DataFrame(insu_list,columns=['chrs','insu'])
list_df=list_df[list_df['chrs']==chrs]
list_df=list_df.reset_index(drop=True)
df_insu=pd.read_table(list_df['insu'][0],sep='\t',names=['chrs','start','end','insu'],comment='t')
df_insu['mid']=(df_insu['start']+df_insu['end'])/2
df_insu=df_insu.fillna(0)
df_insu=df_insu[(df_insu['start']>start)&(df_insu['end']<end)]
return df_insu
def draw_AB(files,res,chrom,start,end,sample,ax):
compartdf = pd.read_table(files,sep='\t',names=['chrom','start','end','eigen1'])
compartdf = compartdf[compartdf['chrom']==chrom]
compartdf = compartdf.reset_index(drop=True)
df = compartdf
df=df[df['end']>=start]
df=df[df['start']<=end]
df=df.reset_index(drop=True)
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top','bottom']:
ax.spines[loc].set_visible(False)
df['width']=df['end']-df['start']
#ax.axis([start, end, min,max])
for i in range(len(df)):
if df['eigen1'][i]>0:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#E7605B',linewidth=0,align='edge')
else:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#3B679E',linewidth=0,align='edge')
ax.set_ylim(-0.1,0.1)
ax.set_ylabel(sample)
ax.set_yticks([])
ax.set_xticks([])
def Express_Swith(Epipath,chrom,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrom','start','end','sign']
Expressdf = Expressdf[Expressdf['chrom']==chrom]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_epigenetic(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
recs = ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(float(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_epigenetic2(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['width'] = markdf['end'] - markdf['start']
x = np.linspace(start,end,int(len(markdf)/8))
a_BSpline=make_interp_spline(markdf['start'],markdf['sign'],k=3)
y_new=a_BSpline(x)
ax.plot(x, y_new, color=color,linewidth=2)
ax.fill_between(x,y_new ,0,facecolor=color,linewidth=0,label=sample)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,4)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,width=1,colors='black',direction='out')
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_RNA(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
vectorf = np.vectorize(np.float)
vectori = np.vectorize(np.int)
starts=vectori(markdf['start'])
hight=vectorf(markdf['sign'])
width=vectori(markdf['width'])
ax.bar(x=starts,height=hight,bottom=0,width=width,color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.4,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def Express_Swith(Epipath,chrs,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrs','start','end','sign']
Expressdf = Expressdf[Expressdf['chrs']==chrs]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_diff_epigenetic(file1,file2,ax,chrs,start,end,color,MaxYlim,MinYlim,type):
df1=Express_Swith(file1,chrs,start,end)
df2=Express_Swith(file2,chrs,start,end)
markdf = pd.merge(df1,df2,on='start',how='inner')
markdf['sign'] = np.log2(markdf['sign_x']) - np.log2(markdf['sign_y'])
markdf = markdf[['chrs_x','start','end_x','sign']]
markdf.columns = ['chrs','start','end','sign']
markdf = markdf.reset_index(drop=True)
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
recs = ax.bar(markdf['start'],markdf['sign'],bottom=0, width = markdf['width'],color=color,linewidth=0)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(MinYlim)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
#ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrs,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(1)
ax.spines['top'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
gc.collect()
def draw_bar(ax,file,chrom,start,end,max,min):
df=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
df=df[df['chrs']==chrom]
df=df[df['start']>start]
df=df[df['end']<end]
df=df.reset_index(drop=True)
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top']:
ax.spines[loc].set_visible(False)
df['width']=df['end']-df['start']
#ax.axis([start, end, min,max])
for i in range(len(df)):
if df['sign'][i]>0:
ax.bar(df['start'][i],df['sign'][i],bottom=0, width = df['width'][i],color='#E7605B',linewidth=0)
else:
ax.bar(df['start'][i],df['sign'][i],bottom=0, width = df['width'][i],color='#3B679E',linewidth=0)
ax.set_ylim(min,max)
ax.set_yticks([])
ax.set_xticks([])
def get_4C_data(matrix,tstart,tend,binsize,start,end):
print (binsize)
t=int((tstart-start)/int(binsize))
print ('t',t,'matrix',len(matrix))
datalist=matrix.loc[:,[t]]
return datalist
from statsmodels.nonparametric.smoothers_lowess import lowess
def draw_4C_module(ax,df_list,chrs,start,end,color_list,ymin,ymax,sample_list):
ax.tick_params(top='off',bottom='off',left='on',right='off')
i=0
for df in df_list:
x = np.linspace(start,end,len(df))
df['width']=df['end']-df['start']
df_loess = pd.DataFrame(lowess(df['sign'], np.arange(len(df['sign'])), frac=0.05)[:, 1], index=df.index, columns=['sign'])
ax.plot(x,df_loess['sign'], color=color_list[i], linewidth=2,label=sample_list[i],alpha=0.3)
i+=1
#ax.legend(handles2, labels2)
ax.set_xlim(start,end)
ax.set_ylim(ymin,ymax)
ax.set_yticks([ymin,ymax])
ax.legend(loc='right',bbox_to_anchor=(1.05,0.3),handlelength=1,handleheight=0.618,fontsize=6,frameon=False)
for loc in ['left']:
ax.spines[loc].set_linewidth(0.6)
ax.spines[loc].set_color('gray')
#ax.tick_params(top=False,right=False,width=1,colors='black',direction='out')
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,bottom=False,width=1,colors='black',direction='out')
def draw_4C(ax,chrs,start,end,matrix_list,samples,binsize,tstart,tend,colors,ymax):
sample_list=samples.split(',')
bed_list=[]
for sample in sample_list:
matrix,min=extract_raw_matrix(matrix_list,sample,chrs,start,end,binsize)
datalist=get_4C_data(matrix,int(tstart),int(tend),binsize,int(start),int(end))
bed_list.append(datalist)
starts=[]
for i in range(start,end,int(binsize)):
starts.append(i)
df_list=[]
for i in bed_list:
df=pd.DataFrame({'start':starts})
df['chrs']=[chrs]*len(df)
df['end']=df['start']+int(binsize)
df['sign']=i
df_list.append(df)
color_list=colors.split(',')
draw_4C_module(ax,df_list,chrs,start,end,color_list,0,int(ymax),sample_list)
def draw_compartment(ax,sample,compmergedf,chrom,start,end,type='top'):
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top']:
ax.spines[loc].set_visible(False)
mat = compmergedf[sample]
#print(mat)
s = compmergedf['start']
colors = ['red','blue','#458B00','#B9BBF9','black']
ax.set_xlim(start,end)
if sample == 'Merge':
ax.fill_between(s, 0, 0.25,where=mat==1, facecolor=colors[0],linewidth=0,label='CompartmentA')
ax.fill_between(s, 0.25, 0.5,where=mat==2, facecolor=colors[2],linewidth=0,label='A Switch B')
ax.fill_between(s, 0, -0.25,where=mat==-1,facecolor=colors[1],linewidth=0,label='CompartmentB')
ax.fill_between(s, -0.25,-0.5,where=mat==-2,facecolor=colors[3],linewidth=0,label='B Switch A')
legend = ax.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.,prop={'size':4},ncol=1)
legend.get_frame().set_facecolor('white')
else:
ax.fill_between(s, 0, mat,where=mat>= 0, facecolor=colors[0],linewidth=0,label='CompartmentA')
ax.fill_between(s, 0, mat,where=mat< 0, facecolor=colors[1],linewidth=0,label='CompartmentB')
#ax.text(max(mat)/4,-5,'A');ax.text(max(mat)/2,-5,'B')
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ymax = mat.max()+0.005
ymin = mat.min()-0.005
xts = np.linspace(start,end,5)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ax.set_ylim(ymin,ymax)
ax.set_yticks([])
#ax.set_ylabel(sample,rotation='vertical',fontsize='small')
#compmergedf = pd.DataFrame()
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(1)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.spines['bottom'].set_visible(False)
gc.collect()
def colorbar(ax,im,vmin,vmax):
axins1 = inset_axes(ax, width=0.1,height=0.6,loc=3, bbox_to_anchor=(0, 0.2, 0.5, 1), bbox_transform=ax.transAxes,borderpad=0)
print (vmin,vmax)
cbar=plt.colorbar(im, cax=axins1, orientation='vertical',ticks=[math.ceil(vmin),int(vmax)])
axins1.tick_params(left='on',right='off',top='off',bottom='off',labelsize=12)
axins1.yaxis.set_ticks_position('left')
return cbar
import math
from matplotlib import pyplot as plt
plt.style.use('seaborn-colorblind')
pd.set_option('display.precision',2)
from scipy import sparse
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
#from annotation_GenePred import Gene_annotation
def cut_mat(mat,start,end,resolution,min):
start = int(int(start)/resolution)
end = math.ceil(int(end)/resolution)
start = int(start - min)
end = int(end - min)
mat = mat.fillna(0)
mat = mat.iloc[start:end+1,start:end+1]
gc.collect()
return mat,start,end
def self_zscore(df):
dsc = pd.DataFrame(np.ravel(df)).describe(include=[np.number])
df = (df - dsc.loc['mean',0])/dsc.loc['std',0]
return df
from scipy.ndimage import gaussian_filter
def get_matrix(mat_path,binsize,start,end):
binsize=int(binsize)
mat=pd.read_table(mat_path,names=['b1','b2','contacts'])
mat=mat[(mat['b1']>=start-3000000) & (mat['b2']>=start-3000000)]
mat=mat[(mat['b1']<=end+3000000) & (mat['b2']<=end+3000000)]
#-----------xlim genome start genome end-------------------------------
min=mat['b1'].min()
max=mat['b1'].max()
min=math.ceil(int(min)/binsize)*binsize
max=int(int(max)/binsize)*binsize
N=int(max/binsize)-math.ceil(min/binsize)+1
mat['b1']=mat['b1'].apply(lambda x: (x-min-1)/binsize)
mat['b2']=mat['b2'].apply(lambda x: (x-min-1)/binsize)
#-----------coo matrix-----------------------------------------------
counts=sparse.coo_matrix((mat['contacts'],(mat['b1'],mat['b2'])),shape=(N, N),dtype=float).toarray()
diag_matrix=np.diag(np.diag(counts))
counts=counts.T + counts
#counts=counts-diag_matrix
counts=counts-diag_matrix-diag_matrix
df= | pd.DataFrame(counts) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import cv2
from ._io_data_generation import check_directory, find_movies, copy_movie
from .LV_mask_analysis import Contour
import matplotlib.pyplot as plt
import networkx as nx
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import cdist
from itertools import combinations
class ExtractEdEs:
def __init__(self, echonet_path=None, output_path=None):
if echonet_path is not None:
self.echonet_path = echonet_path
self.movies_path = os.path.join(echonet_path, 'GoodX2Y2')
self.output_path = check_directory(os.path.join(echonet_path, 'Output'))
if output_path is not None:
self.output_path = check_directory(output_path)
self.df_volume_tracings = None
self.list_of_movie_files = None
self.movie_name = None
def _get_volume_tracings(self):
self.df_volume_tracings = pd.read_excel(
os.path.join(self.echonet_path, 'VolumeTracings.xlsx'),
index_col='FileName',
sheet_name='VolumeTracings')
# TESTING
# self.df_volume_tracings = pd.read_excel(
# os.path.join(r'G:\DataGeneration\echonet_labels', 'VolumeTracingsTest.xlsx'),
# index_col='FileName',
# sheet_name='Sheet1')
@staticmethod
def _get_contour_area(contour):
x, y = contour[:, 0], contour[:, 1]
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
@staticmethod
def _tri_len(triplet):
triplet_shift = triplet.copy()
triplet_shift = np.roll(triplet_shift, 1)
perimeter = np.sum([np.linalg.norm(a - b) for a, b in zip(triplet, triplet_shift)])
return perimeter
def _fix_contour(self, df_split_contour, plot_contour=False):
def _remove_basal_points(_df, label='X1'):
new_df = _df.copy()
points = new_df[label].values
dists = np.abs(np.diff(points))
if dists[-1] > 3 * np.mean(dists):
new_df = new_df.iloc[:-1]
if dists[-2] > 3 * np.mean(dists):
new_df = new_df.iloc[:-1]
return new_df
df_1 = df_split_contour[['X1', 'Y1']].copy()
df_1 = _remove_basal_points(df_1, 'X1')
apex = df_1.iloc[0]
df_2 = df_split_contour[['X2', 'Y2']].copy().iloc[1:]
df_2 = _remove_basal_points(df_2, 'X2')
df_2 = df_2.iloc[::-1]
x = np.concatenate((df_2['X2'], df_1['X1']))
y = np.concatenate((df_2['Y2'], df_1['Y1']))
contour = np.array((x, y)).T
# plt.plot(contour[:, 0], contour[:, 1], '.-')
# plt.show()
fixed_contour = self.sort_points_echonet_contour(contour, apex, False)
if plot_contour:
plt.plot(contour[:, 0], contour[:, 1], ':', label='contour')
plt.plot(fixed_contour[:, 0], fixed_contour[:, 1], '-or', label='contour')
plt.scatter(x=apex[0], y=apex[1],
c='b', marker='d', s=80, label='apex')
plt.scatter(fixed_contour[0, 0], fixed_contour[0, 1], c='g', marker='d', s=80, label='left_basal')
plt.scatter(fixed_contour[-1, 0], fixed_contour[-1, 1], c='k', marker='d', s=80, label='right_basal')
plt.legend()
plt.show()
return fixed_contour, np.where(apex)[0][0]
def sort_points_echonet_contour(self, points, _apex, show):
perimeters, areas = [], []
for i in range(1, 5):
tri = np.array([points[0], _apex, points[-i]])
perimeters.append(self._tri_len(tri))
areas.append(self._get_contour_area(tri))
score = np.array(perimeters) * np.array(areas)
if np.argmax(score) == 0:
new_points = points
else:
new_points = points[:-(np.argmax(score)), :]
new_points = np.flipud(new_points)
if show:
xx = new_points[:, 0]
yy = new_points[:, 1]
plt.figure()
plt.plot(xx, yy, 'd-')
plt.scatter(new_points[-1, 0], new_points[-1, 1], c='r', s=70)
plt.scatter(new_points[0, 0], new_points[0, 1], c='g', s=70)
plt.scatter(_apex[0], _apex[1], c='k', s=70)
plt.show()
return new_points
def sort_points_full_contour(self, points, show):
def _sort_w_neighbours(_points, point_id=10):
print('NearestNeighbors')
clf = NearestNeighbors(2, n_jobs=-1).fit(_points)
G = clf.kneighbors_graph()
point_set = nx.from_scipy_sparse_matrix(G)
opt_order = list(nx.dfs_preorder_nodes(point_set, point_id))
_sorted_points = np.array([_points[new_id] for new_id in opt_order])
return _sorted_points
def _update_marker_ids(_points, _markers):
_markers['id_left_basal'] = int(np.where(_markers['left_basal'] == _points)[0][0])
_markers['id_right_basal'] = int(np.where(_markers['right_basal'] == _points)[0][0])
_markers['id_apex'] = int(np.where(_markers['apex'] == _points)[0][0])
return _markers
def _get_corner_points(_points):
distances = cdist(points, points)
corner_points = np.argmax(distances, axis=0)
unique, counts = np.unique(corner_points, return_counts=True)
pareto_points = points[unique]
print(pareto_points)
combs = list(combinations(pareto_points, r=3))
perimeters, areas, tris = [], [], []
for tri in combs:
tris.append(np.array(tri))
perimeters.append(self._tri_len(np.array(tri)))
areas.append(self._get_contour_area(np.array(tri)))
score = np.array(perimeters) * np.array(areas)
optimal_triangle = np.array(combs[int(np.argmax(score))])
_markers = dict()
basal_points = sorted(optimal_triangle, key=lambda x: (x[1]), reverse=True)[:2]
_markers['left_basal'], _markers['right_basal'] = sorted(basal_points, key=lambda x: (x[0]))
_markers['apex'] = sorted(optimal_triangle, key=lambda x: (x[1]), reverse=False)[0]
_markers = _update_marker_ids(_points, _markers)
return _markers
points = _sort_w_neighbours(points)
markers = _get_corner_points(points)
points = _sort_w_neighbours(points, markers['id_left_basal'])
markers = _update_marker_ids(points, markers)
if markers['id_apex'] > markers['id_right_basal']:
print('basal_direction')
sorted_points = np.concatenate((points[0].reshape(1, -1), points[-1:markers['id_right_basal']-1:-1]))
sorted_points = _sort_w_neighbours(sorted_points, markers['id_left_basal'])
markers = _update_marker_ids(points, markers)
else:
print('apical direction')
sorted_points = points[:markers['id_right_basal']+1]
if show:
xx = sorted_points[:, 0]
yy = sorted_points[:, 1]
plt.figure()
plt.plot(xx, yy, 'd-')
plt.scatter(markers['left_basal'][0], markers['left_basal'][1], c='r', s=70)
plt.scatter(markers['right_basal'][0], markers['right_basal'][1], c='r', s=70)
plt.scatter(markers['apex'][0], markers['apex'][1], c='r', s=70)
plt.show()
return sorted_points, markers
def process_contours(self, movie_id, df_case_data, frame_numbers):
contours = {'id': movie_id}
phases = ['ed', 'es']
for i, frame_number in enumerate(frame_numbers):
df_contour = df_case_data.loc[df_case_data.Frame == frame_number]
contour, apex_id = self._fix_contour(df_contour.copy())
contour_area = self._get_contour_area(contour)
contours[phases[i]] = {'contour': contour, 'contour_area': contour_area, 'frame': frame_number,
'apex_id': apex_id}
if contours['ed']['contour_area'] < contours['es']['contour_area']:
contours['ed'], contours['es'] = contours['es'], contours['ed']
return contours
def process_movie(self, ed_frame, es_frame):
dict_frames = {}
vidcap = cv2.VideoCapture(os.path.join(self.movies_path, self.movie_name))
success, _ = vidcap.read()
vidcap.set(1, es_frame - 1)
success, dict_frames['es'] = vidcap.read()
vidcap.set(1, ed_frame - 1)
success, dict_frames['ed'] = vidcap.read()
return dict_frames
def _save_contours(self, dict_contours):
contours_path = check_directory(os.path.join(self.output_path, 'Contours'))
np.savetxt(os.path.join(contours_path, '{}_ed.csv'.format(dict_contours['id'])),
dict_contours['ed']['contour'], fmt='%1.4f', delimiter=',')
np.savetxt(os.path.join(contours_path, '{}_es.csv'.format(dict_contours['id'])),
dict_contours['es']['contour'], fmt='%1.4f', delimiter=',')
def _save_screenshots(self, dict_contours):
screenshot_path = check_directory(os.path.join(self.output_path, 'Phase_images'))
default_im_size = 1024
frame_images = self.process_movie(dict_contours['ed']['frame'], dict_contours['es']['frame'])
for phase in ['ed', 'es']:
orig_ed_height, orig_ed_width = frame_images[phase].shape[:2]
drawing_contours = np.array([dict_contours[phase]['contour'][:, 0] * default_im_size / orig_ed_height,
dict_contours[phase]['contour'][:, 1] * default_im_size / orig_ed_width]).T
drawing_image = cv2.resize(frame_images[phase], (default_im_size, default_im_size))
cv2.polylines(drawing_image, [np.int32(drawing_contours)], isClosed=False, color=(255, 0, 0), thickness=5)
cv2.imwrite(os.path.join(screenshot_path, "{}_{}.jpg".format(dict_contours['id'], phase)), drawing_image)
def _save_curvature_markers(self, dict_contours):
curvature_indices_path = check_directory(os.path.join(self.output_path, 'Curvature_indices'))
curvature_markers = []
for phase in ('ed', 'es'):
curvature_markers.append(dict_contours[phase]['curvature_markers'])
df_curvature = pd.DataFrame(curvature_markers, index=['ed', 'es'])
df_curvature.to_csv(os.path.join(curvature_indices_path, dict_contours['id'] + '_curv.csv'))
def extract_case_data(self, save_contours=False, save_curvature_indices=True, save_screenshots=False):
curvature_indices = None
movie_id = os.path.splitext(os.path.basename(self.movie_name))[0]
print('Case ID: {}'.format(movie_id))
df_case = self.df_volume_tracings.loc[movie_id]
frames = pd.unique(df_case['Frame'])
assert len(frames) == 2, 'More than 2 contours found for case {}'.format(movie_id)
contours = self.process_contours(movie_id, df_case, frames)
cont = Contour(segmentations_path=None)
for phase in ('ed', 'es'):
cont.endo_sorted_edge, _ = cont._fit_border_through_pixels(contours[phase]['contour'])
cont.curvature = cont._calculate_curvature()
contours[phase]['curvature'] = cont.curvature
contours[phase]['curvature_markers'] = cont._get_curvature_markers()
if save_curvature_indices:
print('Saving curvature indices, ID: {}'.format(contours['id']))
self._save_curvature_markers(contours)
if save_contours:
print('Saving contours, ID: {}'.format(contours['id']))
self._save_contours(contours)
if save_screenshots:
print('Saving phase images, ID: {}'.format(contours['id']))
self._save_screenshots(contours)
return curvature_indices
def sort_movies(self):
# good_x2y2_path = check_directory(os.path.join(self.echonet_path, 'GoodX2Y2'))
# bad_x2y2_path = check_directory(os.path.join(self.echonet_path, 'BadX2Y2'))
movie_id = os.path.splitext(os.path.basename(self.movie_name))[0]
print('Case ID: {}'.format(movie_id))
df_case = self.df_volume_tracings.loc[movie_id]
frames = | pd.unique(df_case['Frame']) | pandas.unique |
from datetime import timezone
import pandas as pd
import numpy as np
import datetime
import netCDF4
import time
def _validate_date(date_text):
'''
Checks date format to ensure YYYY-MM-DD format and return date in
datetime format.
Parameters
----------
date_text: string
Date string format to check
Returns
-------
dt: datetime
'''
assert isinstance(date_text, str), (f'date_text must be' /
'of type string')
try:
dt = datetime.datetime.strptime(date_text, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
else:
dt = dt.replace(tzinfo=timezone.utc)
return dt
def _start_and_end_of_year(year):
'''
Returns a datetime start and end for a given year
Parameters
----------
year: int
Year to get start and end dates
Returns
-------
start_year: datetime object
start of the year
end_year: datetime object
end of the year
'''
assert isinstance(year, (type(None),int,list)), 'year must be of type int'
try:
year = str(year)
start_year = datetime.datetime.strptime(year, '%Y')
except ValueError:
raise ValueError("Incorrect years format, should be YYYY")
else:
next_year = datetime.datetime.strptime(f'{int(year)+1}', '%Y')
end_year = next_year - datetime.timedelta(days=1)
return start_year, end_year
def _dates_to_timestamp(nc, start_date=None, end_date=None):
'''
Returns timestamps from dates.
Parameters
----------
nc: netCDF Object
netCDF data for the given station number and data type
start_date: string
Start date in YYYY-MM-DD, e.g. '2012-04-01'
end_date: string
End date in YYYY-MM-DD, e.g. '2012-04-30'
Returns
-------
start_stamp: float
seconds since the Epoch to start_date
end_stamp: float
seconds since the Epoch to end_date
'''
assert isinstance(start_date, (str, type(None))), ('start_date' /
'must be of type str')
assert isinstance(end_date, (str, type(None))), ('end_date must be' /
'of type str')
time_all = nc.variables['waveTime'][:].compressed()
time_range_all = [datetime.datetime.fromtimestamp(time_all[0]).replace(tzinfo=timezone.utc),
datetime.datetime.fromtimestamp(time_all[-1]).replace(tzinfo=timezone.utc)]
if start_date:
start_datetime = _validate_date(start_date)
if end_date:
end_datetime = _validate_date(end_date)
if start_datetime > end_datetime:
raise Exception(f'start_date ({start_datetime}) must be'+
f'before end_date ({end_datetime})')
elif start_datetime == end_datetime:
raise Exception(f'start_date ({start_datetime}) cannot be'+
f'the same as end_date ({end_datetime})')
if start_date:
if start_datetime > time_range_all[0] and start_datetime < time_range_all[1]:
start_stamp = start_datetime.replace(tzinfo=timezone.utc).timestamp()
else:
print(f'WARNING: Provided start_date ({start_datetime}) is '
f'not in the returned data range {time_range_all} \n'
f'Setting start_date to the earliest date in range '
f'{time_range_all[0]}')
start_stamp = pd.to_datetime(time_range_all[0]).replace(tzinfo=timezone.utc).timestamp()
if end_date:
if end_datetime > time_range_all[0] and end_datetime < time_range_all[1]:
end_stamp = end_datetime.replace(tzinfo=timezone.utc).timestamp()
else:
print(f'WARNING: Provided end_date ({end_datetime}) is '
f'not in the returned data range {time_range_all} \n'
f'Setting end_date to the latest date in range '
f'{time_range_all[1]}')
end_stamp = pd.to_datetime(time_range_all[1]).replace(tzinfo=timezone.utc).timestamp()
if start_date and not end_date:
end_stamp = | pd.to_datetime(time_range_all[1]) | pandas.to_datetime |
"""
Download data from original sources if they are not already present in the data dir
"""
import argparse
import os
from pathlib import Path
import pandas as pd
import requests
def delete_file(target_dir, filename):
test_path = Path(os.path.join(target_dir, filename))
if test_path.is_file():
os.remove(test_path)
# Download the Gender pay gap data from UK Gov if it's not already there
def download_file_if_not_exist(url, target_dir='data', extension='', filename=None):
local_filename = filename if filename is not None else url.split('/')[-1] + extension
Path(target_dir).mkdir(parents=True, exist_ok=True)
test_path = Path(os.path.join(target_dir, local_filename))
if test_path.is_file():
print("{} already exists in '{}' folder".format(local_filename, target_dir))
return
print("Downloading {} to {}".format(local_filename, target_dir))
with requests.get(url, stream=True) as r:
with test_path.open('wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return local_filename
def download_data():
for year in (2017, 2018, 2019):
download_file_if_not_exist(
url='https://gender-pay-gap.service.gov.uk/viewing/download-data/{}'.format(year),
target_dir='data',
filename="ukgov-gpg-{}.csv".format(year))
SIC_CODES_CSV='https://datahub.io/core/uk-sic-2007-condensed/r/uk-sic-2007-condensed.csv'
download_file_if_not_exist(
url=SIC_CODES_CSV,
target_dir='data',
filename='sic_codes.csv'
)
def merge_years(df2017, df2018, df2019):
df2017['year'] = 2017
df2018['year'] = 2018
df2019['year'] = 2019
return pd.concat([df2017, df2018, df2019])
def acquire_data(save_file=False, output_filename='data/ukgov-gpg-full.csv'):
download_data()
df_2017 = pd.read_csv('data/ukgov-gpg-2017.csv', dtype={'SicCodes': str})
df_2018 = pd.read_csv('data/ukgov-gpg-2018.csv', dtype={'SicCodes': str})
df_2019 = | pd.read_csv('data/ukgov-gpg-2019.csv', dtype={'SicCodes': str}) | pandas.read_csv |
import logging
import pandas as pd
from catboost import CatBoostClassifier
logging.basicConfig(filename='logs/model_development.txt',
filemode='a',
format='%(asctime)s %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
logging.warning('-'*100)
logging.warning('FEATURE IMPORTANCE')
# dataset
data = pd.read_csv('reports/final_dataset.csv')
# Initialize the model
catboostclassifier = CatBoostClassifier(verbose=0)
logging.warning('Check features with more higher value.')
for feature in zip(data.columns, catboostclassifier.feature_importances_):
print(feature)
logging.warning('Create a dataframe containing features and their importance.')
feature_dict = dict(zip(data.columns, list(catboostclassifier.feature_importances_)))
feature_df = | pd.DataFrame(feature_dict, index=['Importance']) | pandas.DataFrame |
import cobra
from cobra.core.metabolite import elements_and_molecular_weights
elements_and_molecular_weights['R']=0.0
elements_and_molecular_weights['Z']=0.0
import pandas as pd
import numpy as np
import csv
#### Change Biomass composition
# define a function change a biomass reaction in the model
def update_biomass(model, rxn, stoich, metabolite):
r = model.reactions.get_by_id(rxn)
new_stoich = stoich
# you now have a dictionary of new stoichs for your model
for m,s in r.metabolites.items():
stoich = s*-1
temp_dict = {m:stoich}
r.add_metabolites(temp_dict)
r.add_metabolites(new_stoich)
# Then get the total to equal 1 mg biomass DW
total = 0
for m,s in r.metabolites.items():
gfw = model.metabolites.get_by_id(m.id).formula_weight
mass = gfw*s*-1
total = total+mass
correction = total/1000 # this will get it to 1000 ug total mass
# Then adjust the stoichiometry as appropriate
for m,s in r.metabolites.items(): # now change the stoich
to_add = s/correction-s
r.add_metabolites({m:to_add})
# Finally build the biomass_c metabolite
imbal = r.check_mass_balance()
if 'charge' in imbal.keys():
met_charge = imbal['charge']*-1
del imbal['charge']
met_mass = 0
formula_string = ''
for e,v in imbal.items():
if v > 1e-10 or v < -1e-10:
mass = elements_and_molecular_weights[e]
met_mass = met_mass+(mass*-1*v)
form_str = e+str(-1*v)
formula_string = formula_string + form_str
met = model.metabolites.get_by_id(metabolite)
met.formula = formula_string
met.charge = met_charge
r.add_metabolites({met:1})
# Add GAM constraint
if rxn == 'bof_c':
gam_met = model.metabolites.GAM_const_c
r.add_metabolites({gam_met:1})
model.repair()
print(model.reactions.get_by_id(rxn).reaction)
print('')
print(model.metabolites.get_by_id(met.id).formula)
print('')
print(model.metabolites.get_by_id(met.id).formula_weight)
print('')
print(model.reactions.get_by_id(rxn).check_mass_balance())
return model
#############################################
#### Simulate growth with all constraints####
#############################################
def figure_2LL(model):
## Run all 27 parameter combos and capture:
#### - growth rate
#### - biomass
import math as m
cols = ['ID','GR','mgDW','Cells']
data_out = pd.DataFrame(columns=cols)
Po_vals = ['mean','ub','lb']
a_star_vals = ['mean','ub','lb']
cell_mass = ['mean','ub','lb']
a_star_all = pd.read_csv('pt_a_star.csv',index_col=0,header=0)
rel = pd.read_csv('fluor_lamp.csv',header=0)
xsec = 80. #culture cross sectional area in cm2
path_len = 4.7 #cm
volume = 375.
time_interval = 20
for p in Po_vals:
for a in a_star_vals:
for c in cell_mass:
sample_id = p+'_'+a+'_'+c
model = model
if a == 'mean':
a_star = a_star_all['LL']
elif a == 'ub':
a_star = a_star_all['LL_UB']
else:
a_star = a_star_all['LL_LB']
if c == 'mean':
mgCell = 19.1/1e9
elif c == 'ub':
mgCell = 21.1/1e9
else:
mgCell = 17.1/1e9
innoc = 2.8e6*volume # cells/mL * total mL
iDW = mgCell*innoc # initial culture biomass
gDW = iDW
cells = (gDW/mgCell)
photon_usage = pd.DataFrame(data=[0,0],index = ['Delivered','Absorbed'],columns=['0'])
biomass = pd.DataFrame(data=[gDW,cells],index = ['Biomass','Cells'],columns=['0']) # add it up at the end of each simulation
o2_check = pd.DataFrame(data=[0],index = ['Max oxygen evolution'],columns=['0'])
for t in range(time_interval,1440+time_interval,time_interval): #One simulation every 20 minutes from t=0 to t=24 hrs (1440 min)
import math as m
interval_bm = 0 #initializes the total biomass
photon_count = 0
atten = np.zeros(len(a_star)) #captures light attenuation
tmp_o2_evo = 0
gDW = biomass[str(t-time_interval)]['Biomass'] #mg DW
cells = (gDW/mgCell)
if p == 'mean':
Ps = 2.94e-11 # umol O2 cell-1 s-1
alpha = 9.82e-2
beta = 0.0
resp = 1.83e-12
elif p == 'ub':
Ps = 3.06e-11 # umol O2 cell-1 s-1
alpha = 9.75e-2
beta = 0.0
resp = 1.75e-12
else:
Ps = 2.83e-11 # umol O2 cell-1 s-1
alpha = 9.91e-2
beta = 0.0
resp = 1.92e-12
irrad = 60.
# Photon_flux is the initial amount of light delivered to the culture at each wavelength from 400-700 nm
photon_flux = (rel['rel_height'].values)*irrad*xsec/10000*time_interval*60. #umol/(m2*s) * cm2 * 1m2/10000cm2 * 60s/min * min = umol photons/time interval
total_photons = sum(photon_flux)
photon_usage[str(t)]=[total_photons,0]
total_absorbed = 0.
ti_o2 = 0.
for nm in range(len(photon_flux)):
abs_coeff = a_star[400+nm] # a* value for the given nm (cm2/cell)
Io = photon_flux[nm] # incident photon flux at this nm at this slice (umol_photon/time_interval)
Ia = Io-Io*(m.exp(-1*abs_coeff*cells/volume*path_len))
nm_abs = Ia
total_absorbed = total_absorbed+nm_abs
conv_abs = total_absorbed/time_interval/60./(cells) # converts abs to O2 evo curve units umol/TI * TI/min * min/s * 1/cells => umol/(mgchla*s)
slice_o2 = (Ps*(1-m.exp(-1*alpha*conv_abs/Ps))*(m.exp(-1*beta*conv_abs/Ps)))-resp #umol O2 cell-1 s-1
ti_o2 = ti_o2+(slice_o2*(cells)*60.*time_interval) #umol O2 cell-1 s-1 * cells * s/min * min/TI = umol O2/TI
o2_check[str(t)]=ti_o2
o2evo = ti_o2
model.reactions.EX_o2_e.upper_bound = o2evo # o2evo
model.reactions.EX_o2_e.lower_bound = 0.9*o2evo#0. # <----Po Constraint
photon_usage[str(t)]['Absorbed']=total_absorbed
ngam = resp*60.*time_interval*cells
model.reactions.NGAM.lower_bound = ngam
cef_rate = 5.*gDW/60.*time_interval #CEF sets CEF_h upper bound. umol /(mgDW*h) * mgDW * h/60min * min/TI = umol/TI
model.reactions.CEF_h.upper_bound = cef_rate
model.reactions.EX_photon_e.lower_bound = total_absorbed*-1.
model.reactions.EX_photon_e.upper_bound = total_absorbed*-0.9999999
###### Parameters for PSII fluorescence
## Fv/Fm
FvFm_LL = 0.69
## Calculate Y(II) based on absorbed
abs_conv = total_absorbed/time_interval/60./(cells)
yII_LL = 0.7016*np.exp(-8.535e8*abs_conv)
# yII_LL2 = (2.77e16*(m.pow(abs_conv,2))-(2.51e8*abs_conv)+6.97e-1)
## Y(NPQ)
yNPQ_LL = 1./(1.+(np.power(2.3e-9/abs_conv,3.)))
if yNPQ_LL < 0:
yNPQ_LL = 0.
### Constraints
phoYII = round(yII_LL,2) # Y(II)
regNPQ = round(yNPQ_LL,2) # Y(NPQ)
regFvFm = round((1-FvFm_LL),2) # Photons lost upstream of PSII (1-Fv/Fm)
unrNPQ = round((1-phoYII-regNPQ-regFvFm),2) # Y(NO)
### Edit model constraints with the appropriate values
## PHO_PSIIt_u
rxn = model.reactions.PHO_PSIIt_u
## reset the stoich
for met,s in rxn.metabolites.items():
stoich = s*-1
temp_dict = {met:stoich}
rxn.add_metabolites(temp_dict)
m1 = model.metabolites.photon_YII_u
m2 = model.metabolites.photon_YNPQ_u
m4 = model.metabolites.photon_YNO_u
m3 = model.metabolites.photon_h
m5 = model.metabolites.get_by_id('photon_1-FvFm_u')
rxn.add_metabolites({m1:phoYII,
m2:regNPQ,
m4:unrNPQ,
m5:regFvFm,
m3:-1.})
model.reactions.DM_photon_c.upper_bound = 0. # constrained <----hv Constraint
# Add D1 damage cost uncoupled to PSII
## If uncoupled, set the lower and upper bounds to the experimentally determined values
# Damage rates
D1_rate = 7e-6# # LL: umol D1/mgDW h-1 <---- D1 Constraint
D1_rate = D1_rate * gDW/60.# ugD1 ugDW-1 min-1 * mgDW * 1h/60 min = umolD1 min-1
D1_rate = D1_rate * time_interval # umolD1 min-1 * min/TI = umol D1/TI
model.reactions.NGAM_D1_u.lower_bound = D1_rate #
model.reactions.NGAM_D1_u.upper_bound = 1.0001*(D1_rate) #
## Solve the model
model.objective = 'bof_c'
solution = model.optimize()
if solution.status == 'optimal':
obj_rxn = model.reactions.bof_c
biomass[str(t)]=(gDW+obj_rxn.x,(gDW+obj_rxn.x)/(mgCell))
### collect data
if t == 1440:
dry_weight = biomass['1420']['Biomass']
cell_count = biomass['1420']['Cells']
mu = np.log(biomass['1440']['Cells']/biomass['0']['Cells'])/(1440/60)
data_out = data_out.append({'ID':sample_id,'GR':mu,'mgDW':dry_weight,'Cells':cell_count},ignore_index=True)
return data_out
def figure_2HL(model):
## Run all 27 parameter combos and capture:
#### - growth rate
#### - biomass
import math as m
cols = ['ID','GR','mgDW','Cells']
data_out = pd.DataFrame(columns=cols)
Po_vals = ['mean','ub','lb']
a_star_vals = ['mean','ub','lb']
cell_mass = ['mean','ub','lb']
a_star_all = pd.read_csv('pt_a_star.csv',index_col=0,header=0)
rel = pd.read_csv('fluor_lamp.csv',header=0)
xsec = 80. #culture cross sectional area in cm2
path_len = 4.7 #cm
volume = 375.
time_interval = 20
for p in Po_vals:
for a in a_star_vals:
for c in cell_mass:
sample_id = p+'_'+a+'_'+c
model = model
if a == 'mean':
a_star = a_star_all['HL']
elif a == 'ub':
a_star = a_star_all['HL_UB']
else:
a_star = a_star_all['HL_LB']
if c == 'mean':
mgCell = 20.4/1e9
elif c == 'ub':
mgCell = 21.8/1e9
else:
mgCell = 19.0/1e9
innoc = 3.5e6*volume # cells/mL * total mL
iDW = mgCell*innoc # initial culture biomass
gDW = iDW
cells = (gDW/mgCell)
photon_usage = pd.DataFrame(data=[0,0],index = ['Delivered','Absorbed'],columns=['0'])
biomass = pd.DataFrame(data=[gDW,cells],index = ['Biomass','Cells'],columns=['0']) # add it up at the end of each simulation
o2_check = pd.DataFrame(data=[0],index = ['Max oxygen evolution'],columns=['0'])
for t in range(time_interval,720+time_interval,time_interval): #One simulation every 20 minutes from t=0 to t=24 hrs (1440 min)
import math as m
interval_bm = 0 #initializes the total biomass
photon_count = 0
atten = np.zeros(len(a_star)) #captures light attenuation
tmp_o2_evo = 0
gDW = biomass[str(t-time_interval)]['Biomass'] #mg DW
cells = (gDW/mgCell)
if p == 'mean':
Ps = 2.46e-11 # umol O2 cell-1 s-1
alpha = 9.47e-2
beta = 0.0
resp = 4.72e-12
elif p == 'ub':
Ps = (2.54e-11) # umol O2 cell-1 s-1
alpha = (1.05e-1)
beta = 3.67e-5
resp = 4.28e-12
else:
Ps = (2.35e-11) # umol O2 cell-1 s-1
alpha = (8.5e-2)#
beta = 2.5e-10
resp = 5.15e-12
# Calculate photons and O2 uptake
irrad = 600. #uE
# Photon_flux is the initial amount of light delivered to the culture at each wavelength from 400-700 nm
photon_flux = (rel['rel_height'].values)*irrad*xsec/10000*time_interval*60. #umol/(m2*s) * cm2 * 1m2/10000cm2 * 60s/min * min = umol photons/time interval
total_photons = sum(photon_flux)
photon_usage[str(t)]=[total_photons,0]
total_absorbed = 0.
ti_o2 = 0.
for nm in range(len(photon_flux)):
abs_coeff = a_star[400+nm] # a* value for the given nm (cm2/cell)
Io = photon_flux[nm] # incident photon flux at this nm at this slice (umol_photon/time_interval)
Ia = Io-Io*(m.exp(-1*abs_coeff*cells/volume*path_len))
nm_abs = Ia
total_absorbed = total_absorbed+nm_abs
conv_abs = total_absorbed/time_interval/60./(cells) # converts abs to O2 evo curve units umol/TI * TI/min * min/s * 1/cells => umol/(mgchla*s)
slice_o2 = (Ps*(1-m.exp(-1*alpha*conv_abs/Ps))*(m.exp(-1*beta*conv_abs/Ps)))-resp #umol O2 cell-1 s-1
ti_o2 = ti_o2+(slice_o2*(cells)*60.*time_interval) #umol O2 cell-1 s-1 * cells * s/min * min/TI = umol O2/TI
o2_check[str(t)]=ti_o2
o2evo = ti_o2
model.reactions.EX_o2_e.upper_bound = o2evo #1000. #
model.reactions.EX_o2_e.lower_bound = 0.9*o2evo#-1000. #
photon_usage[str(t)]['Absorbed']=total_absorbed
ngam = resp*60.*time_interval*cells
model.reactions.AOX_m.lower_bound = ngam
cef_rate = 5.*gDW/60.*time_interval #CEF sets CEF_h upper bound. umol /(mgDW*h) * mgDW * h/60min * min/TI = umol/TI
model.reactions.CEF_h.upper_bound = cef_rate
model.reactions.EX_photon_e.lower_bound = total_absorbed*-1.
model.reactions.EX_photon_e.upper_bound = total_absorbed*-0.9999999
###### Parameters for PSII fluorescence
## Fv/Fm
FvFm_HL = 0.63
## Calculate Y(II) based on absorbed
abs_conv = total_absorbed/time_interval/60./(cells)
yII_HL = 0.6398*np.exp(-1.169e9*abs_conv)
## Y(NPQ)
yNPQ_HL = 1./(1.+(np.power(1.37e-9/abs_conv,5.5)))
if yNPQ_HL < 0:
yNPQ_HL = 0.
### Constraints
phoYII = round(yII_HL,2) # Y(II)
regNPQ = round(yNPQ_HL,2) # Y(NPQ)
regFvFm = round((1-FvFm_HL),2) # Photons lost upstream of PSII (1-Fv/Fm)
unrNPQ = round((1-phoYII-regNPQ-regFvFm),2) # Y(NO)
### Edit model constraints with the appropriate values
## Set Y(II) constraints
rxn = model.reactions.PHO_PSIIt_u
## reset the stoich
for met,s in rxn.metabolites.items():
stoich = s*-1
temp_dict = {met:stoich}
rxn.add_metabolites(temp_dict)
m1 = model.metabolites.photon_YII_u
m2 = model.metabolites.photon_YNPQ_u
m4 = model.metabolites.photon_YNO_u
m3 = model.metabolites.photon_h
m5 = model.metabolites.get_by_id('photon_1-FvFm_u')
rxn.add_metabolites({m1:phoYII,
m2:regNPQ,
m4:unrNPQ,
m5:regFvFm,
m3:-1.})
## Set photon constraints
model.reactions.DM_photon_c.upper_bound = 0. # constrained
# Add D1 damage cost uncoupled to PSII
# Damage rates
D1_rate = 2.52e-4# # LL: umol D1/mgDW h-1 <---- D1 Constraint
D1_rate = D1_rate * gDW/60.# ugD1 ugDW-1 min-1 * mgDW * 1h/60 min = umolD1 min-1
D1_rate = D1_rate * time_interval # umolD1 min-1 * min/TI = umol D1/TI
model.reactions.NGAM_D1_u.lower_bound = D1_rate #0.#
model.reactions.NGAM_D1_u.upper_bound = 1.0001*(D1_rate)#0.#
## Solve the model
model.objective = 'bof_c'
solution = model.optimize()
if solution.status == 'optimal':
obj_rxn = model.reactions.bof_c
biomass[str(t)]=(gDW+obj_rxn.x,(gDW+obj_rxn.x)/(mgCell))
### collect data
if t == 720:
dry_weight = biomass['700']['Biomass']
cell_count = biomass['700']['Cells']
mu = np.log(biomass['720']['Cells']/biomass['0']['Cells'])/(720/60)
data_out = data_out.append({'ID':sample_id,'GR':mu,'mgDW':dry_weight,'Cells':cell_count},ignore_index=True)
return data_out
def figure_2HL_HCO3(model):
## Run all 27 parameter combos and capture:
#### - growth rate
#### - biomass
import math as m
cols = ['ID','GR','mgDW','Cells']
data_out = pd.DataFrame(columns=cols)
Po_vals = ['mean','ub','lb']
a_star_vals = ['mean','ub','lb']
cell_mass = ['mean','ub','lb']
a_star_all = pd.read_csv('pt_a_star.csv',index_col=0,header=0)
rel = pd.read_csv('fluor_lamp.csv',header=0)
xsec = 80. #culture cross sectional area in cm2
path_len = 4.7 #cm
volume = 375.
time_interval = 20
for p in Po_vals:
for a in a_star_vals:
for c in cell_mass:
sample_id = p+'_'+a+'_'+c
model = model
if a == 'mean':
a_star = a_star_all['HL']
elif a == 'ub':
a_star = a_star_all['HL_UB']
else:
a_star = a_star_all['HL_LB']
if c == 'mean':
mgCell = 20.4/1e9
elif c == 'ub':
mgCell = 21.8/1e9
else:
mgCell = 19.0/1e9
innoc = 3.5e6*volume # cells/mL * total mL
iDW = mgCell*innoc # initial culture biomass
gDW = iDW
cells = (gDW/mgCell)
photon_usage = pd.DataFrame(data=[0,0],index = ['Delivered','Absorbed'],columns=['0'])
biomass = pd.DataFrame(data=[gDW,cells],index = ['Biomass','Cells'],columns=['0']) # add it up at the end of each simulation
o2_check = pd.DataFrame(data=[0],index = ['Max oxygen evolution'],columns=['0'])
for t in range(time_interval,720+time_interval,time_interval): #One simulation every 20 minutes from t=0 to t=24 hrs (1440 min)
import math as m
interval_bm = 0 #initializes the total biomass
photon_count = 0
atten = np.zeros(len(a_star)) #captures light attenuation
tmp_o2_evo = 0
gDW = biomass[str(t-time_interval)]['Biomass'] #mg DW
cells = (gDW/mgCell)
if p == 'mean':
Ps = (2.46e-11)*1.15 # umol O2 cell-1 s-1
alpha = (9.47e-2)
beta = 0.0
resp = 4.72e-12
elif p == 'ub':
Ps = (2.54e-11)*1.15 # umol O2 cell-1 s-1
alpha = (1.05e-1)
beta = 3.67e-5
resp = 4.28e-12
else:
Ps = (2.35e-11)*1.15 # umol O2 cell-1 s-1
alpha = (8.5e-2)#
beta = 2.5e-10
resp = 5.15e-12
# Calculate photons and O2 uptake
irrad = 600. #uE
# Photon_flux is the initial amount of light delivered to the culture at each wavelength from 400-700 nm
photon_flux = (rel['rel_height'].values)*irrad*xsec/10000*time_interval*60. #umol/(m2*s) * cm2 * 1m2/10000cm2 * 60s/min * min = umol photons/time interval
total_photons = sum(photon_flux)
photon_usage[str(t)]=[total_photons,0]
total_absorbed = 0.
ti_o2 = 0.
for nm in range(len(photon_flux)):
abs_coeff = a_star[400+nm] # a* value for the given nm (cm2/cell)
Io = photon_flux[nm] # incident photon flux at this nm at this slice (umol_photon/time_interval)
Ia = Io-Io*(m.exp(-1*abs_coeff*cells/volume*path_len))
nm_abs = Ia
total_absorbed = total_absorbed+nm_abs
conv_abs = total_absorbed/time_interval/60./(cells) # converts abs to O2 evo curve units umol/TI * TI/min * min/s * 1/cells => umol/(mgchla*s)
slice_o2 = (Ps*(1-m.exp(-1*alpha*conv_abs/Ps))*(m.exp(-1*beta*conv_abs/Ps)))-resp #umol O2 cell-1 s-1
ti_o2 = ti_o2+(slice_o2*(cells)*60.*time_interval) #umol O2 cell-1 s-1 * cells * s/min * min/TI = umol O2/TI
o2_check[str(t)]=ti_o2
o2evo = ti_o2
model.reactions.EX_o2_e.upper_bound = o2evo #1000. #
model.reactions.EX_o2_e.lower_bound = 0.9*o2evo#-1000. #
photon_usage[str(t)]['Absorbed']=total_absorbed
ngam = resp*60.*time_interval*cells
model.reactions.AOX_m.lower_bound = ngam
cef_rate = 5.*gDW/60.*time_interval #CEF sets CEF_h upper bound. umol /(mgDW*h) * mgDW * h/60min * min/TI = umol/TI
model.reactions.CEF_h.upper_bound = cef_rate
model.reactions.EX_photon_e.lower_bound = total_absorbed*-1.
model.reactions.EX_photon_e.upper_bound = total_absorbed*-0.9999999
###### Parameters for PSII fluorescence
## Fv/Fm
FvFm_HL = 0.63
## Calculate Y(II) based on absorbed
abs_conv = total_absorbed/time_interval/60./(cells)
yII_HL = 0.6398*np.exp(-1.169e9*abs_conv)
## Y(NPQ)
yNPQ_HL = 1./(1.+(np.power(1.37e-9/abs_conv,5.5)))
if yNPQ_HL < 0:
yNPQ_HL = 0.
### Constraints
phoYII = round(yII_HL,2) # Y(II)
regNPQ = round(yNPQ_HL,2) # Y(NPQ)
regFvFm = round((1-FvFm_HL),2) # Photons lost upstream of PSII (1-Fv/Fm)
unrNPQ = round((1-phoYII-regNPQ-regFvFm),2) # Y(NO)
### Edit model constraints with the appropriate values
## Set Y(II) constraints
rxn = model.reactions.PHO_PSIIt_u
## reset the stoich
for met,s in rxn.metabolites.items():
stoich = s*-1
temp_dict = {met:stoich}
rxn.add_metabolites(temp_dict)
m1 = model.metabolites.photon_YII_u
m2 = model.metabolites.photon_YNPQ_u
m4 = model.metabolites.photon_YNO_u
m3 = model.metabolites.photon_h
m5 = model.metabolites.get_by_id('photon_1-FvFm_u')
rxn.add_metabolites({m1:phoYII,
m2:regNPQ,
m4:unrNPQ,
m5:regFvFm,
m3:-1.})
## Set photon constraints
model.reactions.DM_photon_c.upper_bound = 0. # constrained
# Add D1 damage cost uncoupled to PSII
# Damage rates
D1_rate = 2.52e-4# # LL: umol D1/mgDW h-1 <---- D1 Constraint
D1_rate = D1_rate * gDW/60.# ugD1 ugDW-1 min-1 * mgDW * 1h/60 min = umolD1 min-1
D1_rate = D1_rate * time_interval # umolD1 min-1 * min/TI = umol D1/TI
model.reactions.NGAM_D1_u.lower_bound = D1_rate #0.#
model.reactions.NGAM_D1_u.upper_bound = 1.0001*(D1_rate)#0.#
## Solve the model
model.objective = 'bof_c'
solution = model.optimize()
if solution.status == 'optimal':
obj_rxn = model.reactions.bof_c
biomass[str(t)]=(gDW+obj_rxn.x,(gDW+obj_rxn.x)/(mgCell))
### collect data
if t == 720:
dry_weight = biomass['700']['Biomass']
cell_count = biomass['700']['Cells']
mu = np.log(biomass['720']['Cells']/biomass['0']['Cells'])/(720/60)
data_out = data_out.append({'ID':sample_id,'GR':mu,'mgDW':dry_weight,'Cells':cell_count},ignore_index=True)
return data_out
###################################################
#### Simulate growth with variable constraints ####
###################################################
def simulate(model,light,photon_const,Po_const,YII_const,D1_const,DM20):
a_star = pd.read_csv('pt_a_star.csv',index_col=0,header=0)
a_star = a_star[light]
# read in the cool white lamp spectral density
rel = pd.read_csv('fluor_lamp.csv',header=0)
volume = 375.
if light == 'LL':
mgCell = 19.1/1e9 # mgDW per cell <-- LL +/- 2.0
innoc = 2.8e6*volume # cells/mL * total mL
duration = 1440 # total length of the simulation
#O2 rate equation
# input: umol photons cell-1 s-1
# Output: umol O2 cell-1 s-1
# Mean
Ps = 2.94e-11 # umol O2 cell-1 s-1
alpha = 9.82e-2
beta = 0.0
resp = 1.83e-12
# # upper bound
# Ps = 3.06e-11 # umol O2 cell-1 s-1
# alpha = 9.75e-2
# beta = 0.0
# resp = 1.75e-12
# # lower bound
# Ps = 2.83e-11 # umol O2 cell-1 s-1
# alpha = 9.91e-2
# beta = 0.0
# resp = 1.92e-12
irrad = 60.
if light == 'HL':
mgCell = 20.4/1e9 # mgDW per cell <-- HL Bounds 20.4+/- 1.4
innoc = 3.5e6*volume # cells/mL * total mL
duration = 720 # total length of the simulation
# # HL_bicarb
Ps = (2.46e-11)*1.15 # umol O2 cell-1 s-1
alpha = (9.47e-2)
beta = 0.0
resp = 4.72e-12
# #upper bound
# HL
# Ps = (2.54e-11) # umol O2 cell-1 s-1
# alpha = (1.05e-1)
# beta = 3.67e-5
# resp = 4.28e-12
# #lower bound
## HL
# Ps = (2.35e-11) # umol O2 cell-1 s-1
# alpha = (8.5e-2)#
# beta = 2.5e-10
# resp = 5.15e-12
irrad = 600. #uE
iDW = mgCell*innoc # initial culture biomass
### Cell count is mgDW/mgCell
xsec = 80. #culture cross sectional area in cm2
path_len = 4.7 #cm
time_interval = 20
gDW = iDW
cells = (gDW/mgCell)
#__________________________Initialize variables to collect output data___________________________________
# Initialize an unused photon count
photon_usage = pd.DataFrame(data=[0,0],index = ['Delivered','Absorbed'],columns=['0'])
biomass = | pd.DataFrame(data=[gDW,cells],index = ['Biomass','Cells'],columns=['0']) | pandas.DataFrame |
"""
@author: <NAME>
@email: <EMAIL>
this file augments the precomputed features using pyspark and add wordcount and size of article
"""
import pandas as pd
import requests
import sys
def page_search(session, title):
"""
:param session: http session from wikipedia API
:param title: find the page with wiki title
:return: title, size, word_count, articleid
"""
url = "https://en.wikipedia.org/w/api.php"
PARAMS = {
"action": "query",
"format": "json",
"list": "search",
"srsearch": title
}
response = session.get(url=url, params=PARAMS).json()
query = response["query"]
search = query["search"]
title = None
if len(search) > 0:
title = search[0]["title"]
articleid = int(search[0]["pageid"])
size = int(search[0]["size"])
word_count = int(search[0]["wordcount"])
elif "suggestion" in query.get("searchinfo", []):
title = query["searchinfo"]["suggestion"]
size = 0
word_count = 0
articleid = 0
else:
print("title not found!")
print(title, query)
size, word_count, articleid = None, None, None
return title, size, word_count, articleid
def fetch_categories(session):
"""
:param session: http session for wiki api
:return: closure which fetches categories of article title
"""
def __fetch_categories(row):
URL = "https://en.wikipedia.org/w/api.php"
PARAMS = {
"action": "query",
"format": "json",
"prop": "categories",
"pageids": row["articleid"]
}
R = session.get(url=URL, params=PARAMS)
DATA = R.json()
PAGES = DATA["query"]["pages"]
for k, v in PAGES.items():
try:
if 'categories' in v:
for cat in v['categories']:
row["category"] = cat["title"]
print(row.articletitle, "==>", cat["title"])
break
except Exception as err:
print(err)
return row
return __fetch_categories
def DFS(session, article_title, size, word_count):
"""
:param session: http session for wiki api
:param article_title: title of wiki article
:param size: article size
:param word_count: word count
:return: None
"""
revisions_url = "https://en.wikipedia.org/w/api.php" #?action=query&prop=revisions&format=json&&rvlimit=5&formatversion=2".format(article_title)
PARAMS = {
"action": "query",
"format": "json",
"prop": "revisions",
"rvlimit": 5,
"formatversion":2,
"titles": article_title
}
diff_url = "http://en.wikipedia.org/w/index.php?diff={}&oldid={}"
response= session.get(url=revisions_url, params=PARAMS).json()
pages = response["query"]["pages"][0]
df = pd.DataFrame()
try:
revisions = pages["revisions"]
if size is not None and word_count is not None:
title, size, word_count, articleid = page_search(session, article_title)
except KeyError as err:
# HARD LUCK
# =================================
# spell mistakes of article title
# query wikipedia for corrected title
# print(pages)
# url = "https://en.wikipedia.org/w/api.php"
# PARAMS = {
# "action": "query",
# "format": "json",
# "list": "search",
# "srsearch": pages["title"]
# }
# response = session.get(url=url, params=PARAMS).json()
# query = response["query"]
# search = query["search"]
# title = None
# if len(search) > 0:
# title = search[0]["title"]
# elif "suggestion" in query.get("searchinfo",[]):
# title= query["searchinfo"]["suggestion"]
# else:
# print("title not found!")
# print(query)
# ====================================
title, size, word_count, articleid = page_search(session=session, title=pages["title"])
if title is None:
return df
print(pages["title"], " ==> ", title)
df = DFS(session, title, size, word_count)
return df
df = df.append({"size":size, "word_count":word_count, "article_title":title, "articleid": int(articleid)}, ignore_index=True)
return df
def augment_data():
"""
:return:filenames of collected edits
"""
sess = requests.Session()
df = pd.DataFrame()
edits = pd.read_csv("edits.csv")
edits_df = edits
#.sample(n=10) # [["editid", "editor", "newrevisionid", "oldrevisionid", "editcomment", "diffurl", "articletitle"]]
try:
for index, row in edits_df.iterrows():
_df=DFS(sess,row["articletitle"], size=0, word_count=0)
if not _df.empty:
edits_df.loc[index, "size"] = _df["size"][0]
edits_df.loc[index, "word_count"] = _df["word_count"][0]
edits_df.loc[index, "articleid"] = _df["articleid"][0]
edits_df.loc[index, "articletitle"] = _df["article_title"][0]
else:
print("failed")
print(row["articletitle"])
filename = "augmented_with_size_wc.csv"
edits_df = edits_df.dropna()
edits_df.to_csv(filename, index=False)
print(edits_df.count())
except Exception as err:
filename = "incomplete.csv"
edits_df.to_csv(filename, index=False)
print(edits_df.count())
print(err)
sys.exit(0)
except KeyboardInterrupt as err:
filename = "incomplete.csv"
edits_df.to_csv(filename, index=False)
print(edits_df.count())
print(err)
sys.exit(0)
return filename
if __name__ == "__main__":
merged_data = | pd.read_csv("merged_augmented.csv") | pandas.read_csv |
import pandas as pd
def generate_train(playlists):
# define category range
cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100),
'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)}
cat_pids = {}
for cat, interval in cates.items():
df = playlists[(playlists['num_tracks'] >= interval[0]) & (playlists['num_tracks'] <= interval[1])].sample(
n=1000)
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions, tracks):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = | pd.concat([df_test_pl, df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 22:23:07 2020
@author: atidem
"""
import pandas as pd
import numpy as np
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from statsmodels.tsa.ar_model import AR,ARResults
from statsmodels.tsa.arima_model import ARIMA,ARMA,ARIMAResults,ARMAResults
from pmdarima import auto_arima
from sklearn.metrics import mean_absolute_error,mean_squared_error
import matplotlib.pyplot as plt
from matplotlib.pyplot import show
import warnings
warnings.filterwarnings("ignore")
from math import sqrt
import matplotlib as mt
import statsmodels as st
import sklearn as sk
import worldometerDataHandler as handle
from statsmodels.tsa.statespace.sarimax import SARIMAX
#%% Notes
## default test rate 0.2 , replaceable (Ar,Arma,Arima)
## worldometer link , replaceable
## have to search parameter for each country
## russian data have "did not converge" exception
## edited library for keep going to work in exception "lib\site-packages\numpy\linalg\linalg.py"
#%%
resultsData = []
dataPosLenDeath = 0
dataPosLenCases = 0
#%% temp method
def runAllMethods():
#%% get data from worldometer's link
global dataPosLenCases,dataPosLenDeath
getData = handle.GetDataFromWorldometer(url)
df = getData.handleData()
#%%
dataLen = len(df)
#positivity
dataPosDeath = df[df.Deaths>0]
dataPosLenDeath = len(dataPosDeath)
dataPosCases = df[df.Cases>0]
dataPosLenCases = len(dataPosCases)
# size of predict(daily)
predDayCount = 30
# total range
totalIdx = pd.date_range(df.index[0],periods=dataLen+predDayCount,freq='D')
#df["Cases"][:pd.to_datetime("19.3.2020",format="%d.%m.%Y")]
#%% measure metrics
def mape(a,b):
mask = a != 0
return (np.fabs(a - b)/a)[mask].mean()
def mae(a,b):
return mean_absolute_error(a,b)
def rmse(a,b):
return sqrt(mean_squared_error(a,b))
#%% Holt Winters
"""
---Holt Winters---
alpha = smoothing_level
beta = smoothing_slope
gamma = smoothing_seasonal
phi = damping_slope
tren = mul , add
seasonal = mul , add
seasonal period
damped = True , False
Gonna add user interface
"""
def holtWinters(data,alpha=None,beta=None,gamma=None,phi=None,tren=None,seasonal='add',period=None,damp=False):
dataPos = data[data>0]
dataPosLen = len(dataPos)
dataPos = pd.to_numeric(dataPos,downcast='float')
#print(dataPos)
pred = pd.DataFrame(index=totalIdx)
model = ExponentialSmoothing(dataPos[:dataPosLen],trend=tren,seasonal=seasonal,seasonal_periods=period,damped=damp)
pred["Fitted_Values"] = model.fit(smoothing_level=alpha,smoothing_slope=beta,smoothing_seasonal=gamma,damping_slope=phi).fittedvalues
pred["Predicted_Values"] = pd.Series(model.predict(model.params,start=df.index[-1],end=totalIdx[-1]),index=totalIdx[dataLen-1:])
return pred
## Holt Winters Prediction Section
## default values (alpha=None,beta=None,gamma=None,phi=None,tren=None,seasonal='add',period=None,damp=False)
Case_mul_mul = holtWinters(data=df.Cases,alpha=0.25,beta=0.25,gamma=0,tren='mul',seasonal='mul',period=dataPosLenCases-1,damp=True)
Case_mul_mul.rename(columns={"Fitted_Values":"Cases_hw_tes_mul-mul","Predicted_Values": "Cases_predict_hw_tes_mul"},inplace=True)
Case_add_add = holtWinters(data=df.Cases,alpha=0.9,beta=0.9,gamma=0,tren='add',seasonal='add',period=dataPosLenCases-1,damp=False)
Case_add_add.rename(columns={"Fitted_Values":"Cases_hw_tes_add-add","Predicted_Values": "Cases_predict_hw_tes_add"},inplace=True)
Death_mul_mul = holtWinters(data=df.Deaths,alpha=0.9,beta=0.9,gamma=0,tren='mul',seasonal='mul',period=dataPosLenDeath-1,damp=True)
Death_mul_mul.rename(columns={"Fitted_Values":"Deaths_hw_tes_mul","Predicted_Values": "Deaths_predict_hw_tes_mul"},inplace=True)
Death_add_add = holtWinters(data=df.Deaths,alpha=0.9,beta=0.9,gamma=0,tren='add',seasonal='add',period=dataPosLenDeath-1,damp=False)
Death_add_add.rename(columns={"Fitted_Values":"Deaths_hw_tes_add","Predicted_Values": "Deaths_predict_hw_tes_add"},inplace=True)
## merge prediction and main dataframe
finalDf = | pd.concat([df,Case_mul_mul,Case_add_add,Death_mul_mul,Death_add_add],axis=1) | pandas.concat |
import numpy as np
#import scipy.io #required to read Matlab *.mat file
from scipy import linalg
import pandas as pd
import networkx as nx
#import pickle
import itertools
from sklearn.covariance import GraphLassoCV, ledoit_wolf, graph_lasso
from statsmodels.stats.correlation_tools import cov_nearest
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as patches
from matplotlib.ticker import MaxNLocator
import json
import glob
def X_from_excel(file, gmax=5):
""" Reads pedigree data and outputs a balanced table,
where rows are pedigrees and columns are lineal positions.
gmax is the maximum allowed number of generations"""
X_raw = pd.read_excel(file, index_col=[0,1])
X_raw = X_raw.unstack()
X_raw.columns = ['{}'.format(x[1]) for x in X_raw.columns]
""" Create list of all cell id's within given generations
Integer id's, e.g. [1,2,3,4,5,6,7]"""
gmin = 1 #always start at generation 1
cint = range(int(2**(gmin-1)),int(2**gmax))
"""Convert to binary id's: ['1','10','11','100','101','110','111']"""
cbin = [str(bin(i))[2:] for i in cint]
cells = cbin
"""Populate new df, which has a complete list of cell id's, w/ data"""
X = pd.DataFrame(index=X_raw.index, columns=cells)
X.update(X_raw) #fill nan's with data where available
return X
def MLEchordal(S,Morder=1):
"""Calculate Kmle from the marginals of cliques and separators in S.
S is the sample covariance. Morder is the order of the Markov chain."""
p = S.shape[0] #dimensionality of one side of S
idx = range(p) #list of variables
K_mle = np.zeros((p,p)) # initialize K
"""Markov order cannot be greater than dimensionality-1"""
if Morder > p-1:
Morder = p-1
"""Identify cliques and separators in a Markov chain"""
Cq = []; Sp = []
for i in range(0,p-Morder):
Cq.append(list(idx[i:i+1+Morder]))
if i > 0:
Sp.append(sorted(set(Cq[i-1]).intersection(set(Cq[i]))))
""" Build K_mle from cliques and separators"""
# Add cliques
for c in Cq:
Scq = S[np.ix_(c,c)]
Kcq = np.linalg.pinv(Scq)
K_mle[np.ix_(c,c)] += Kcq
# Subtract separators
for s in Sp:
Ssp = S[np.ix_(s,s)]
Ksp = np.linalg.pinv(Ssp)
K_mle[np.ix_(s,s)] -= Ksp
cov_mle = np.linalg.pinv(K_mle)
#cov_mle = cov_mle.round(10) #remove bit noise to make symmetric
cov_mle = (cov_mle + cov_mle.T)/2 #symmetrize
return cov_mle, K_mle
""" Modified Cholesky decomposition"""
def LDL_decomp(A):
if not (A.T == A).all():
print('Matrix must be symmetric!')
print(A-A.T) #show to make sure asymmetry is just numerical error
A = (A + A.T)/2 #make it symmetric
C = np.linalg.cholesky(A)
d = np.diag(C)
D = np.diag(d**2)
dinv = np.diag(1/d)
L = C.dot(dinv)
return L, D
def Estep(X,params):
""" E-Step of the EM algorithm"""
n, p = X.shape
mu = params['mu']
cov = params['cov']
"""Sufficient statistics"""
x_cum = np.zeros(p) # cumulative sum of completed rows
xx_cum = np.zeros([p,p]) # cumulative sum of completed outer products
#ent_cum = 0 # cumulative entropy
#num_m_cum = 0 # cumulative number of missing elements
for i in range(n):
"""Partition cov into observed and missing quadrants"""
j_o = np.array(np.where(~np.isnan(X[i,:]))).flatten()
j_m = np.array(np.where(np.isnan(X[i,:]))).flatten()
num_m = len(j_m)
mu_o = mu[j_o]
mu_m = mu[j_m]
cov_oo = cov[np.ix_(j_o,j_o)] #np.ix_(a,b) = outer product <a,b>
cov_mm = cov[np.ix_(j_m,j_m)]
cov_om = cov[np.ix_(j_o,j_m)]
cov_mo = cov[np.ix_(j_m,j_o)]
xo = X[i,j_o] #observed values
if num_m == 0: #Simple when no missing data
x_cum += xo
xx_cum += np.outer(xo,xo)
else:
"""Expected x,xx conditioned on xo, P(xm|xo;params)"""
"""Expected vals of x and xx"""
xm_E = mu_m +cov_mo.dot(np.linalg.pinv(cov_oo)).dot(xo-mu_o)
x_all = np.empty(p)
x_all[j_o] = xo
x_all[j_m] = xm_E # use E(xm|xo;params) for missing elements
xx_all = np.outer(x_all,x_all) #need to correct xmxm block
"""Add residual covariance E(xm*xm|xo) to xmxm:
p.648 Eq (17.44) Hastie et al.
p.225, Eq (11.5) Little & Rubin"""
xmxm_E = cov_mm - cov_mo.dot(np.linalg.pinv(cov_oo)).dot(cov_om)
xx_all[np.ix_(j_m,j_m)] += xmxm_E
"""Cumulative sum over previous x and xx"""
x_cum += x_all
xx_cum += xx_all
#Non-constant terms of entropy of P(xm|xo;params) for LL
#ent_cum += 0.5*np.log(np.linalg.det(xx_mm_E))
#Increment cumulative number of missing elements
#num_m_cum += num_m
# Constant entropy term P(z|xo;params)
#ent_const = 0.5*num_m_cum*(1+np.log(2*np.pi))
"""Expected complete data log-likelihood"""
S = xx_cum - 2*np.outer(mu,x_cum) + n*np.outer(mu,mu) #Sample cov
ll = -0.5*(n*np.log(np.linalg.det(cov))+
np.trace(S.dot(np.linalg.pinv(cov))))
"""Compute log-likelihood"""
#ell = -0.5*n*np.log(np.linalg.det(2*np.pi*cov)) - \
# 0.5*np.trace(np.linalg.pinv(cov).dot(S))
#ll = ell #+ ent_cum + ent_const
"""Store sufficient statistics in dict"""
ss = {'xTot':x_cum, 'xxOuter':xx_cum, 'nExamples':n}
return ss,ll
class MVN:
""" Fits a Multivariate Gaussian Distribution from a dataset which may
have missing values by using the Expectation-Maximisation Algorithm
Code originally sourced from:
https://github.com/charlienash/mvn-missing-data/blob/master/mvn.py"""
def __init__(self, em_tol=1e-2, em_maxIter=1000, em_verbose=True):
self.em_tol = em_tol
self.em_maxIter = em_maxIter
self.em_isFitted = False
self.em_verbose = em_verbose
self.pattern = None #covariance pattern
self.label_map = None # mapping from covariance pattern to indices
#plt.rc('text', usetex=False)
#plt.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'],
# 'monospace': ['Computer Modern Typewriter']})
#import matplotlib as mpl
#mpl.rcParams.update(mpl.rcParamsDefault)
def create_F(self, gmax,plabels):
""" Creates the change-of-basis matrix. Each row is a basis vector.
Returned as a dataframe with indices and columns properly labeled. """
"""Create gmax Haar transform matrices and store in list"""
Nvars = 2**gmax-1
Hlist = [np.ones(1)[:,np.newaxis]] # Haar list, start with [[1]].
modes = [(1,1,1)] # List of mode names
dim_func = lambda i: 1 if i==1 else 2**(i-2) #dimensionality of irrep
for g in range(1,gmax):
Ng = Hlist[g-1].shape[0]
a = np.kron(Hlist[g-1], np.array([1.,1.]))
b = np.kron(np.eye(Ng),np.array([1.,-1.]))
Hlist.append(np.concatenate([a,b],axis=0))
for irrep in range(1,g+2):
for d in range(1,dim_func(irrep)+1):
modes.append((g+1,irrep,d)) #Hard-coded index ordering
"""Label modes: (Multiplicity, Irreducible, Dimension) """
modes = pd.MultiIndex.from_tuples(modes, names=['mul','irp','dim'])
"""Direct sum of H matrices for each gen."""
F = pd.DataFrame(linalg.block_diag(*Hlist),index=modes,columns=plabels)
"""Normalize each symmetry adapted basis vector (=row)"""
F = F.apply(lambda x: x/np.sqrt(x.dot(x)), axis=1)
""" Reorder indices in order of increasing index frequency """
F = F.reorder_levels(['irp','dim','mul'],axis=0)
"""Sort symmetry-adapted basis vectors by irp then by dim"""
F = F.sort_index(axis=0,level=['irp','dim']) #Sort by irp,dim
"""Set the \ell and \tau indices to start at 0 instead of 1."""
F.index = F.index.set_levels(F.index.levels[0]-1, level=0) #'irp' = \ell
F.index = F.index.set_levels(F.index.levels[1]-1, level=1) #'dim' = \tau
"""Aggregate the degenerate eigenspaces"""
#Q = F.groupby(axis=0,level=['irp','mul']).sum() #sum the degeneracies
Q = F.groupby(axis=0,level=['irp','mul']).mean() #avrge the degeneracies
return F,Q
def avPattern(self, dat, pmap):
""" Average together elements that have the same label.
Args
----
dat: array of data values
pmap: mapping from unique array elements to pooling indices
Returns
----
dat: array of pooled data (changes dat in place)
"""
for k in pmap:
if len(dat.shape) == 1:
q = pmap[k][0]
elif len(dat.shape) == 2:
q = pmap[k]
dat[q] = dat[q].mean()
return dat
def find_MLE(self, cov_emp):
""" Finds the penalised MLE given an empirical cov"""
""" No decomposition: find MLE of full cov matrix all at once """
if self.MLEalgo == 'glasso':
"""Convert cov_emp to correlation matrix """
sig_emp = np.sqrt(np.diag(cov_emp))
corr_emp = cov_emp / np.outer(sig_emp,sig_emp)
""" gLasso of patterned correlation matrix """
corr_MLE = graph_lasso(corr_emp,
alpha = self.alpha,
verbose = self.gl_verbose,
max_iter = self.gl_maxIter)[0] #Ignore prec_reg output, [1]
"""Convert back to covariance"""
cov_MLE = corr_MLE * np.outer(sig_emp,sig_emp)
""" Decomposition: Find MLE of orthogonal components then recombine"""
if self.MLEalgo == 'ortho_glasso' or self.MLEalgo == 'ortho_select':
"""Convert to symmetry-adapted basis"""
scov_emp = self.F.dot(cov_emp.dot(self.F.T)) #DataFrame type
scov_emp.columns = self.F.index #since df has no column index (hack)
"""Initialise penalised spec_cov"""
scov_p = pd.DataFrame(0,columns=scov_emp.columns,
index=scov_emp.index)
"""Extract list of unique irreps"""
irp = scov_emp.index.get_level_values('irp').unique().tolist()
for i in irp:
"""Extract entire irreducible subspace"""
scov_iD=scov_emp.xs(key=i,level='irp',axis=0,drop_level=False)\
.xs(key=i,level='irp',axis=1,drop_level=False)
"""Get multiplicity of subspace"""
Nm_i = scov_iD.index.get_level_values('mul').unique().shape[0]
if Nm_i == 1: # No glasso if multiplicity is 1
scov_p.loc[scov_iD.index,scov_iD.index] = scov_iD
elif Nm_i > 1: # Use glasso
"""Get dimensionality of subspace"""
Nd_i=scov_iD.index.get_level_values('dim').unique().shape[0]
if Nd_i == 1: #Single block only
cv = scov_iD.values
sg = np.sqrt(np.diag(cv))
cr = cv / np.outer(sg,sg) #elementwise division
if self.MLEalgo == 'ortho_glasso':
""" Perform glasso optimization """
scov_p.loc[scov_iD.index,scov_iD.index] = \
graph_lasso(cr, alpha=self.alpha,
verbose=self.gl_verbose,
max_iter=self.gl_maxIter)[0] * np.outer(sg,sg)
if self.MLEalgo == 'ortho_select':
"""Perform covariance selection; explicit MLE"""
scov_p.loc[scov_iD.index,scov_iD.index] = \
MLEchordal(cv,Morder=self.Morder)[0]
elif Nd_i > 1: #Direct sum over repeated blocks
"""Extract 1st dimensions of irreps (unique params)"""
scov_i1 = scov_iD\
.xs(key=1,level='dim',axis=0,drop_level=False)\
.xs(key=1,level='dim',axis=1,drop_level=False)
"""glasso over 1st dimensions"""
cv = scov_i1.values
sg = np.sqrt(np.diag(cv))
cr = cv / np.outer(sg,sg)
if self.MLEalgo == 'ortho_glasso':
""" Perform glasso optimization """
scovp_tmp = graph_lasso(cr, alpha=self.alpha,
verbose=self.gl_verbose,
max_iter=self.gl_maxIter)[0] * np.outer(sg,sg)
if self.MLEalgo == 'ortho_select':
"""Perform covariance selection; explicit MLE"""
scovp_tmp = MLEchordal(cv,Morder=self.Morder)[0]
""" direct sum over dimensionality of subspace. Nd_i
is the number of repeated blocks in the irrep """
scov_p.loc[scov_iD.index,scov_iD.index] = \
linalg.block_diag(*[scovp_tmp]*Nd_i)
cov_MLE = self.F.T.dot(scov_p.dot(self.F)).values
return cov_MLE
def _mStep(self, ss):
""" M-Step of the EM-algorithm.
The M-step takes the expected sufficient statistics computed in the
E-step, and maximizes the expected complete data log-likelihood with
respect to the parameters.
Args
----
ss : dict
pattern: matrix of labels to identify unique covariance matrix elements
Returns
-------
params : dict
"""
mu_emp = 1/ss['nExamples'] * ss['xTot']
""" Pool mu elements according to pattern matrix """
if self.pattern == True:
mu_emp = self.avPattern(mu_emp, self.mu_map)
cov_emp = 1/ss['nExamples'] * (ss['xxOuter']) - np.outer(mu_emp,mu_emp)
""" Pool cov elements according to pattern matrix """
if self.pattern == True:
cov_emp = self.avPattern(cov_emp, self.cov_map)
"""Check that the covariance matrix is positive definite. If it is not
this usually spells doom for the iteration."""
if ~np.all(np.linalg.eigvals(cov_emp) > 0):
print('MStep: Matrix is NOT positive definite')
print('Replace cov with nearest positive definite matrix')
cov_emp = cov_nearest(cov_emp)
#cov_emp = (cov_emp + cov_emp.T)/2.
""" Find MLE for covariance """
cov_MLE = self.find_MLE(cov_emp)
"""Make sure that shared elements are equal (the glasso nearly preserves
their equality, just not exactly, so need to pool elements again)"""
#if self.pattern == True:
# self.avPattern(corr_p, self.cov_map)
#mu = mu_emp.copy()
#cov = cov_p.copy()
# Store params in dictionary
params = {
'mu' : mu_emp,
'cov' : cov_MLE
}
return params
def fit(self, dfX, pattern=None, paramsInit=None,
MLEalgo='ortho_select', alpha=0.0, Morder=1,
gl_maxIter=500, gl_verbose=False):
""" Fit the model using EM with data X.
Args
----
dfX : DataFrame, [nExamples, dataDim]
df of training data, where nExamples is the number of
examples and dataDim is the number of dimensions.
MLEalgo: algorithm for finding MLE
'glasso' - graphical lasso of observable covariance
'ortho_glasso' - glasso of orthogonal components
'ortho_select' - explicit MLE from covariance selection; this
requires setting the Markov order, Morder
alpha: regularisation parameter for glasso (0=no regularisation)
Morder: order of the Markov chain to use in covariance selection
"""
nExamples, dataDim = np.shape(dfX)
X = dfX.values.astype(float)
plabels = dfX.columns #name of each data variable
self.alpha = alpha
self.MLEalgo = MLEalgo
self.Morder = Morder
self.gl_maxIter = gl_maxIter
self.gl_verbose = gl_verbose
""" Load positions of shared parameters in mean and cov"""
if pattern is not None:
self.pattern = True
self.cov_pattern = pattern.values
self.mu_pattern = np.diag(pattern.values) #derived from cov_pattern
# Create dicts mapping unique cov element identifier to index:
# key=unique identifier; value=list of indices
pattern_to_map = lambda p: {i:np.where(p==i) for i in np.unique(p)}
self.cov_map = pattern_to_map(self.cov_pattern)
self.mu_map = pattern_to_map(self.mu_pattern)
""" Initial guess of mu and cov; required for missing data """
if paramsInit is None:
#params = {'mu' : np.zeros(dataDim),#np.random.normal(size=dataDim),
# 'cov' : np.eye(dataDim)}
mu00 = dfX.mean().mean() #global mean
mu0 = dfX.mean().values
mu0 = self.avPattern(mu0, self.mu_map) #pooled mean values
mu0[np.isnan(mu0)] = mu00 #remaining nan's set to global mean
var00 = dfX.var().mean() #global variance
var0 = dfX.var().values
var0 = self.avPattern(var0, self.mu_map) #pooled variance
var0[np.isnan(var0)] = var00 #remaining nan's set to global mean
cov0 = np.diag(var0)
params = {'mu':mu0, 'cov':cov0}
#mu0 = dfX.fillna(dfX.mean()).mean().values
#cov0 = dfX.fillna(dfX.mean()).cov().values
#params = {'mu': mu0, 'cov':cov0}
else:
params = paramsInit
""" Create change-of-basis matrix """
gens = [len(id) for id in plabels] #recover generation values
gmin, gmax = min(gens), max(gens)
self.F, self.Q = self.create_F(gmax,plabels) # Change-of-basis matrix
""" Initialise log-likelihood """
oldL = -np.inf
""" Iterate over E and M steps """
for i in range(self.em_maxIter):
"""E-Step"""
#ss.keys() = {'xTot', 'xxOuter', 'nExamples'}
""" WHAT IS THE DIFFERENCE BETWEEN Estep and _eStep??? """
ss, ll = Estep(X,params)
#ss, ll = self._eStep(X, params)
"""M-step"""
params = self._mStep(ss)
"""Evaluate likelihood"""
if self.em_verbose:
print("Iter {:d} NLL: {:.3f} Change: {:.3f}".format(i,
-ll, -(ll-oldL)), flush=True)
"""Break if change in likelihood is small"""
if np.abs(ll - oldL) < self.em_tol:
break
oldL = ll
else:
if self.em_verbose:
print("MVN did not converge within the specified tolerance." +
" You might want to increase the number of iterations.")
"""Mean, Covariance, Sigma """
mu = params['mu']
cov = params['cov']
sig = np.sqrt(np.diag(cov))
self.mu = | pd.Series(mu, index=plabels) | pandas.Series |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Copyright 2015-2017, Institute for Systems Biology.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import re
from isb_cgc_user_data.utils.error_handling import UduException
def convert_file_to_dataframe(filepath_or_buffer, sep="\t", skiprows=0, rollover=False, nrows=None, header=None, logger=None):
"""does some required data cleaning and
then converts into a dataframe
"""
if logger:
logger.log_text("Converting file to a dataframe", severity='INFO')
try:
# items to change to NaN/NULL
# when you change something here, remember to change in clean_up_dataframe too.
na_values = ['none', 'None', 'NONE', 'null', 'Null', 'NULL', ' ', 'NA', '__UNKNOWN__', '?']
# read the table/file
# EXCEPTION THROWN: TOO MANY FIELDS THROWS CParserError e.g. "Expected 207 fields in line 3, saw 208"
# EXCEPTION THROWN: EMPTY FILE THROWS CParserError e.g. "Passed header=0 but only 0 lines in file"
data_df = pd.read_table(filepath_or_buffer, sep=sep, skiprows=skiprows, lineterminator='\n',
comment='#', na_values=na_values, dtype='object', nrows=nrows, header=header)
except Exception as exp:
if logger:
logger.log_text("Read Table Error: {0}".format(str(exp.message)), severity='ERROR')
user_message = None
pattern = re.compile('^.* error: (.*)$')
match = pattern.match(str(exp.message))
if match:
err_guts = match.group(1)
if err_guts:
user_message = "Error parsing file: {0}. ".format(err_guts[:400])
if not user_message:
if "0 lines in file" in str(exp.message):
user_message = "File was empty"
if not user_message:
user_message = "Parsing error reading file"
raise UduException(user_message)
finally:
filepath_or_buffer.close() # close StringIO
return data_df
#----------------------------------------
# Convert newline-delimited JSON string to dataframe
# -- should work for a small to medium files
# we are not loading into string, but into a temp file
# works only in a single bucket
#----------------------------------------
def convert_njson_file_to_df(filebuffer, logger=None):
"""Converting new-line delimited JSON file into dataframe"""
if logger:
logger.log_text("Converting new-line delimited JSON file into dataframe", severity='INFO')
# convert the file into a dataframe
lines = [json.loads(l) for l in filebuffer.splitlines()]
data_df = | pd.DataFrame(lines) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri May 20 14:09:31 2016
@author: bmanubay
"""
import cirpy
import numpy as np
import pandas as pd
from sklearn.externals.joblib import Memory
mem = Memory(cachedir="/home/bmanubay/.thermoml/")
@mem.cache
def resolve_cached(x, rtype):
return cirpy.resolve(x, rtype)
# Define list of all alkane SMILES strings that appear in all of our data
SMILES_alk = ['C', 'CC', 'CCC', 'CCCC', 'CCCCC', 'CCCCCC', 'CCCCCCC', 'CCCCCCCC', 'CCCCCCCCC', 'CCCCCCCCCC', 'CC(C)C', 'CCC(C)C', 'CCCC(C)C', 'C1CCCCC1', 'CC1CCCCC1', 'CCCCCC(C)C', 'CC(C)C(C)C', 'CCC(C)(C)C', 'CCC(C)CC', 'CCCC(C)C', 'CC(C)CC(C)(C)C', 'C1CCCC1', 'C1CCCCCCC1', 'CCC1CCCCC1', 'CC1CCC(C)C(C)C1', 'CCCCC1CCCCC1', 'CC1CCCC1', 'CCCCCCC(C)C', 'CCCCCCCC(C)C', 'CCCCC(C)CCC', 'CCC(C)CCC(C)CC', 'CCC(C)CC(C)CC', 'CCCCCC(C)C', 'C1CCCCCC1', 'CC(C)C(C)C', 'CCC(C)(C)C']
S = pd.DataFrame({'SMILES': SMILES_alk}, columns = ['SMILES'])
# Binary mixtures
aa1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/actcoeff_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa2 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/dens_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa3 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/dielec_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa4 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/eme_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa5 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/emcp_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa6 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/emv_bin.csv", sep=';', index_col= 'Unnamed: 0')
aa7 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Property data/sos_bin.csv", sep=';', index_col= 'Unnamed: 0')
# Binary Mixtures with alkane-alkane mixtures removed
cc1c = pd.concat([aa1["x1"], aa1["x2"], aa1['SMILES1'], aa1['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc1c.SMILES1.isin(S.SMILES) & cc1c.SMILES2.isin(S.SMILES))
cc1c = cc1c[ind]
cc1c = cc1c.drop(['x1','x2'], axis=1)
count1c = pd.Series(cc1c.squeeze().values.ravel()).value_counts()
count1c = count1c.reset_index()
count1c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc1 = pd.concat([count1c["SMILES"], count1c["Count"]], axis=1, keys=["SMILES", "Count"])
cc2c = pd.concat([aa2["x1"], aa2["x2"], aa2['SMILES1'], aa2['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc2c.SMILES1.isin(S.SMILES) & cc2c.SMILES2.isin(S.SMILES))
cc2c = cc2c[ind]
cc2c = cc2c.drop(['x1','x2'], axis=1)
count2c = pd.Series(cc2c.squeeze().values.ravel()).value_counts()
count2c = count2c.reset_index()
count2c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc2 = pd.concat([count2c["SMILES"], count2c["Count"]], axis=1, keys=["SMILES", "Count"])
cc3c = pd.concat([aa3["x1"], aa3["x2"], aa3['SMILES1'], aa3['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc3c.SMILES1.isin(S.SMILES) & cc3c.SMILES2.isin(S.SMILES))
cc3c = cc3c[ind]
cc3c = cc3c.drop(['x1','x2'], axis=1)
count3c = pd.Series(cc3c.squeeze().values.ravel()).value_counts()
count3c = count3c.reset_index()
count3c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc3 = pd.concat([count3c["SMILES"], count3c["Count"]], axis=1, keys=["SMILES", "Count"])
cc4c = pd.concat([aa4["x1"], aa4["x2"], aa4['SMILES1'], aa4['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc4c.SMILES1.isin(S.SMILES) & cc4c.SMILES2.isin(S.SMILES))
cc4c = cc4c[ind]
cc4c = cc4c.drop(['x1','x2'], axis=1)
count4c = pd.Series(cc4c.squeeze().values.ravel()).value_counts()
count4c = count4c.reset_index()
count4c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc4 = pd.concat([count4c["SMILES"], count4c["Count"]], axis=1, keys=["SMILES", "Count"])
cc5c = pd.concat([aa5["x1"], aa5["x2"], aa5['SMILES1'], aa5['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc5c.SMILES1.isin(S.SMILES) & cc5c.SMILES2.isin(S.SMILES))
cc5c = cc5c[ind]
cc5c = cc5c.drop(['x1','x2'], axis=1)
count5c = pd.Series(cc5c.squeeze().values.ravel()).value_counts()
count5c = count5c.reset_index()
count5c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc5 = pd.concat([count5c["SMILES"], count5c["Count"]], axis=1, keys=["SMILES", "Count"])
cc6c = pd.concat([aa6["x1"], aa6["x2"], aa6['SMILES1'], aa6['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc6c.SMILES1.isin(S.SMILES) & cc6c.SMILES2.isin(S.SMILES))
cc6c = cc6c[ind]
cc6c = cc6c.drop(['x1','x2'], axis=1)
count6c = pd.Series(cc6c.squeeze().values.ravel()).value_counts()
count6c = count6c.reset_index()
count6c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc6 = pd.concat([count6c["SMILES"], count6c["Count"]], axis=1, keys=["SMILES", "Count"])
cc7c = pd.concat([aa7["x1"], aa7["x2"], aa7['SMILES1'], aa7['SMILES2']], axis=1, keys = ["x1", "x2", "SMILES1", "SMILES2"])
ind = np.logical_not(cc7c.SMILES1.isin(S.SMILES) & cc7c.SMILES2.isin(S.SMILES))
cc7c = cc7c[ind]
cc7c = cc7c.drop(['x1','x2'], axis=1)
count7c = pd.Series(cc7c.squeeze().values.ravel()).value_counts()
count7c = count7c.reset_index()
count7c.rename(columns={"index":"SMILES",0:"Count"},inplace=True)
ccc7 = pd.concat([count7c["SMILES"], count7c["Count"]], axis=1, keys=["SMILES", "Count"])
# All data counts
c1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_cpmol.csv", sep=';', usecols=['SMILES', 'Count'])
c2 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_dens.csv", sep=';', usecols=['SMILES', 'Count'])
c3 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_dielec.csv", sep=';', usecols=['SMILES', 'Count'])
c4 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_hmol.csv", sep=';', usecols=['SMILES', 'Count'])
c5 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Pure/Component counts/purecomp_counts_sos.csv", sep=';', usecols=['SMILES', 'Count'])
cc1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_actcoeff.csv", sep=';', usecols=['SMILES', 'Count'])
cc2 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_dens.csv", sep=';', usecols=['SMILES', 'Count'])
cc3 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_dielec.csv", sep=';', usecols=['SMILES', 'Count'])
cc4 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_eme.csv", sep=';', usecols=['SMILES', 'Count'])
cc5 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_emcp.csv", sep=';', usecols=['SMILES', 'Count'])
cc6 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_emv.csv", sep=';', usecols=['SMILES', 'Count'])
cc7 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Component counts/bincomp_counts_sos.csv", sep=';', usecols=['SMILES', 'Count'])
gg1 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Mixture counts/mix_counts_actcoeff.csv", sep=';', index_col= 'Unnamed: 0')
gg2 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Mixture counts/mix_counts_dens.csv", sep=';', index_col= 'Unnamed: 0')
gg3 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Mixture counts/mix_counts_dielec.csv", sep=';', index_col= 'Unnamed: 0')
gg4 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Mixture counts/mix_counts_eme.csv", sep=';', index_col= 'Unnamed: 0')
gg5 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Mixture counts/mix_counts_emcp.csv", sep=';', index_col= 'Unnamed: 0')
gg6 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Mixture counts/mix_counts_emv.csv", sep=';', index_col= 'Unnamed: 0')
gg7 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/Binary/Mixture counts/mix_counts_sos.csv", sep=';', index_col= 'Unnamed: 0')
a = c2.merge(c1,how='outer',on=['SMILES'],suffixes=(' dens pure', ' cpmol pure')).merge(c5,how='outer',on=['SMILES'],suffixes=(' cpmol pure', ' sos pure')).merge(c3,how='outer',on=['SMILES'],suffixes=(' sos pure', ' dielec pure')).merge(c4,how='outer',on=['SMILES'],suffixes=(' dielec pure', ' hmol pure'))
a.replace(np.nan,0,inplace=True)
a.rename(columns={'Count':'Count hmol pure'},inplace=True)
b = cc2.merge(cc1,how='outer',on=['SMILES'],suffixes=(' dens binary', ' actcoeff binary')).merge(cc7,how='outer',on=['SMILES'],suffixes=(' actcoeff binary', ' sos binary')).merge(cc3,how='outer',on=['SMILES'],suffixes=(' sos binary', ' dielec binary')).merge(cc4,how='outer',on=['SMILES'],suffixes=(' dielec binary', ' eme binary')).merge(cc5,how='outer',on=['SMILES'],suffixes=(' eme binary', ' emcp binary')).merge(cc6,how='outer',on=['SMILES'],suffixes=(' emcp binary', ' emv binary'))
b.replace(np.nan,0,inplace=True)
b.rename(columns={'Count':'Count emv binary'},inplace=True)
bb = ccc2.merge(ccc1,how='outer',on=['SMILES'],suffixes=(' dens binary', ' actcoeff binary')).merge(ccc7,how='outer',on=['SMILES'],suffixes=(' actcoeff binary', ' sos binary')).merge(ccc3,how='outer',on=['SMILES'],suffixes=(' sos binary', ' dielec binary')).merge(ccc4,how='outer',on=['SMILES'],suffixes=(' dielec binary', ' eme binary')).merge(ccc5,how='outer',on=['SMILES'],suffixes=(' eme binary', ' emcp binary')).merge(ccc6,how='outer',on=['SMILES'],suffixes=(' emcp binary', ' emv binary'))
bb.replace(np.nan,0,inplace=True)
bb.rename(columns={'Count':'Count emv binary'},inplace=True)
c = pd.merge(a,b,how='outer',on=['SMILES'])
c.replace(np.nan,0,inplace=True)
cc = | pd.merge(a,bb,how='outer',on=['SMILES']) | pandas.merge |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
| assert_series_equal(expected, actual) | pandas.testing.assert_series_equal |
import argparse
import os
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from mtc.challenge_pipelines.preprocess_data import generate_preprocessed_data
from mtc.settings import NLP_EXPERIMENT_PATH, NLP_RAW_DATA
from mtc.core.embeddings import DocumentEmbeddings
from mtc.core.sentence import Sentence
root_path = Path(NLP_RAW_DATA) / 'n2c2'
train_name = 'clinicalSTS2019.train.txt'
test_name = 'clinicalSTS2019.test.txt'
test_labels_name = 'clinicalSTS2019.test.gs.sim.txt'
def prepare_input_folder_official(folder):
folder = folder / 'n2c2'
folder.mkdir(parents=True, exist_ok=True)
# Just copy the original data
shutil.copy2(root_path / train_name, folder / train_name)
shutil.copy2(root_path / test_name, folder / test_name)
shutil.copy2(root_path / test_labels_name, folder / test_labels_name)
def prepare_input_folder_complete(folder):
folder = folder / 'n2c2'
folder.mkdir(parents=True, exist_ok=True)
# Load raw data
df_train = pd.read_csv(root_path / train_name, sep='\t', header=None)
df_test = pd.read_csv(root_path / test_name, sep='\t', header=None)
df_test[2] = pd.read_csv(root_path / 'clinicalSTS2019.test.gs.sim.txt', header=None)
# Create a combined dataframe
df = pd.concat([df_train, df_test])
# Shuffle the rows
df = df.sample(frac=1, random_state=1337).reset_index(drop=True)
# Use combined file as new training data
df.to_csv(folder / train_name, sep='\t', header=None, index=None)
# Check whether the new files has the correct format
df_train = pd.read_csv(folder / train_name, sep='\t', header=None)
assert df_train.shape == (2054, 3)
assert pd.api.types.is_object_dtype(df.dtypes[0])
assert pd.api.types.is_object_dtype(df.dtypes[1])
assert | pd.api.types.is_float_dtype(df.dtypes[2]) | pandas.api.types.is_float_dtype |
from typing import *
import numpy as np
import argparse
from toolz.itertoolz import get
import zarr
import re
import sys
import logging
import pickle
import pandas as pd
from sympy import Point, Line
from skimage import feature, measure, morphology, img_as_float
from skimage.filters import rank_order
from scipy import ndimage as nd
from pathlib import Path
from pysmFISH.utils import convert_from_uint16_to_float64
from pysmFISH.data_models import Output_models
from pysmFISH.logger_utils import selected_logger
class osmFISH_dots_thr_selection():
"""
Class used to automatically define the threshold used to call the
signal peaks. This function calculate the threshold without masking large object
and contamination.
This is the original class used in the osmFISH paper.
"""
def __init__(self, img:np.ndarray, parameters_dict:Dict, min_int:float=False, max_int:float=False,min_peaks:int=False):
"""Initialize the class
Args:
img (np.ndarray): Image to process
parameters_dict (Dict): Parameters used to define the peaks.
min_int (float, optional): Minimum intensity value to use for the binning of
the signal intensities. Defaults to False.
max_int (float, optional): Maximum intensity value to use for the binning of
the signal intensities. Defaults to False.
min_peaks (int, optional): Minimum number of peaks required for the
calculation of the counting threshold. Defaults to False.
"""
self.img = img
self.parameters_dict = parameters_dict
self.min_int = min_int
self.max_int = max_int
self.min_peaks = min_peaks
if self.min_peaks == False:
self.min_peaks = 3
self.min_distance = self.parameters_dict['min_distance']
self.fill_value = np.nan
# List with the total peaks calculated for each threshold
self.total_peaks = []
self.thr_used = []
def counting_graph(self):
"""Function used for the construction of the number of peaks(Y) / thresholds(X)
graph used to define the threshold.
"""
binning = 100
# Define the range of thr to be tested
if self.img.max() == 0:
self.thr_array = []
else:
if self.min_int and self.max_int:
self.thr_array = np.linspace(self.min_int,self.max_int,num=binning)
elif self.min_int:
self.thr_array = np.linspace(self.min_int,self.img.max(),num=binning)
elif self.max_int:
self.thr_array = np.linspace(np.min(self.img[np.nonzero(self.img)]),self.max_int,num=binning)
else:
self.thr_array = np.linspace(np.min(self.img[np.nonzero(self.img)]),self.img.max(),num=binning)
# Calculate the number of peaks for each threshold. In this calculation
# the size of the objects is not considered
self.peak_counter_min = 0
self.peak_counter_max = 0
for vl, thr in enumerate(self.thr_array):
# The border is excluded from the counting
self.peaks = feature.peak_local_max(self.img,min_distance=self.min_distance,\
threshold_abs=thr,exclude_border=False, indices=True,\
num_peaks=np.inf, footprint=None,labels=None)
self.number_peaks = len(self.peaks)
# Stop the counting when the number of peaks detected falls below 3
if self.number_peaks<=self.min_peaks:
self.stop_thr = thr # Move in the upper loop so you will stop at the previous thr
break
else:
self.total_peaks.append(len(self.peaks))
self.thr_used.append(thr)
def thr_identification(self):
"""Function that use the number of peaks / thresholds graph to define the threshold
to used for the counting.
- calculate the gradient of the number of peaks / threshold function
- remove the initial minimum point
- calculate the segment that join the extremities of the gradient. This version
of the code uses sympy.
- Calculate the thr corresponding to the point of max distance from the segment
"""
# Consider the case of no detectected peaks or if there is only one Thr
# that create peaks (list total_peaks have only one element and )
# if np.array(total_peaks).sum()>0 or len(total_peaks)>1:
if len(self.total_peaks)>1:
# Trim the threshold array in order to match the stopping point
# used the [0][0] to get the first number and then take it out from list
# thr_array = thr_array[:np.where(thr_array==stop_thr)[0][0]]
self.thr_array = np.array(self.thr_used)
# Calculate the gradient of the number of peaks distribution
grad = np.gradient(self.total_peaks)
# Restructure the data in order to avoid to consider the min_peak in the
# calculations
# Coord of the gradient min_peak
grad_min_peak_coord = np.argmin(grad)
# Trim the data to remove the peak.
self.trimmed_thr_array = self.thr_array[grad_min_peak_coord:]
self.trimmed_grad = grad[grad_min_peak_coord:]
if self.trimmed_thr_array.shape>(1,):
# Trim the coords array in order to maintain the same length of the
# tr and pk
self.trimmed_total_peaks = self.total_peaks[grad_min_peak_coord:]
# To determine the threshold we will determine the Thr with the biggest
# distance to the segment that join the end points of the calculated
# gradient
# Distances list
distances = []
# Calculate the coords of the end points of the gradient
p1 = Point(self.trimmed_thr_array[0],self.trimmed_grad[0])
p2 = Point(self.trimmed_thr_array[-1],self.trimmed_grad[-1])
# Create a line that join the points
s = Line(p1,p2)
allpoints = np.arange(0,len(self.trimmed_thr_array))
# Calculate the distance between all points and the line
for p in allpoints:
dst = s.distance(Point(self.trimmed_thr_array[p],self.trimmed_grad[p]))
distances.append(dst.evalf())
# Remove the end points from the lists
self.trimmed_thr_array = self.trimmed_thr_array[1:-1]
self.trimmed_grad = self.trimmed_grad[1:-1]
self.trimmed_total_peaks = self.trimmed_total_peaks[1:-1]
self.trimmed_distances = distances[1:-1]
# Determine the coords of the selected Thr
# Converted trimmed_distances to array because it crashed
# on Sanger.
if self.trimmed_distances: # Most efficient way will be to consider the length of Thr list
thr_idx = np.argmax(np.array(self.trimmed_distances))
self.selected_thr = self.trimmed_thr_array[thr_idx]
# The selected threshold usually causes oversampling of the number of dots
# I added a stringency parameter (int n) to use to select the Thr+n
# for the counting. It selects a stringency only if the trimmed_thr_array
# is long enough. Also consider the case in which the stringency in negative
else:
self.selected_thr = self.fill_value
self.trimmed_thr_array = self.fill_value
else:
self.selected_thr = self.fill_value
self.trimmed_thr_array = self.fill_value
else:
self.selected_thr = self.fill_value
self.trimmed_thr_array = self.fill_value
class osmFISH_dots_mapping():
"""Function used to count the peaks after identification of the threshold
and masking of large objects.
This is the original class used in the osmFISH paper.
"""
def __init__(self,img: np.ndarray,thr: float,parameters_dict: dict):
"""Class initialization
Args:
img (np.ndarray): Image to process
thr (float): Precalculate threshold for masking the image
parameters_dict (dict): Parameters used to define the peaks.
"""
# Calculate the selected peaks after removal of the big and small objects
self.img = img
self.thr = thr
# make an error if selected Thr <0
self.parameters_dict = parameters_dict
self.min_distance = self.parameters_dict['min_distance']
self.min_obj_size = self.parameters_dict['min_obj_size']
self.max_obj_size = self.parameters_dict['max_obj_size']
self.num_peaks_per_label = self.parameters_dict['num_peaks_per_label']
self.fill_value = np.nan
# Threshold the image using the selected threshold
img_mask = self.img>self.thr
labels = nd.label(img_mask)[0]
properties = measure.regionprops(labels)
for ob in properties:
if ob.area<self.min_obj_size or ob.area>self.max_obj_size:
img_mask[ob.coords[:,0],ob.coords[:,1]]=0
labels = nd.label(img_mask)[0]
# Collect the properties of the labels after size selection
properties = measure.regionprops(labels,intensity_image=self.img)
self.selected_peaks = feature.peak_local_max(self.img, min_distance=self.min_distance,
threshold_abs=self.thr, exclude_border=True,
footprint=None, labels=labels,num_peaks_per_label=self.num_peaks_per_label)
# # calling peak_local_max without Labels argument
# selected_peaks_mask = feature.peak_local_max(self.img, min_distance=self.min_distance,
# threshold_abs=self.thr, exclude_border=True,
# footprint=None,num_peaks=np.inf,indices=False).astype(int)
# # instead, make sure the selected peaks does not meet zeros at labels (background)
# labels_mask = (labels > 0).astype(int)
# selected_peaks_mask = selected_peaks_mask * labels_mask
# self.selected_peaks = np.vstack(np.where(selected_peaks_mask)).T
if self.selected_peaks.size:
self.intensity_array = self.img[self.selected_peaks[:,0],self.selected_peaks[:,1]]
else:
self.intensity_array = np.nan
def peak_thrs_local_max_fast(image: np.ndarray, min_distance: int=1,
threshold_abs: float=None,threshold_rel:float=None,
exclude_border: int=True, indices: bool=True,
num_peaks: int=np.inf, footprint: np.ndarray=None,
labels: np.ndarray=None)->np.ndarray:
"""Function after modification:
returns the coordinates for a range of thresholds
Peaks are the local maxima in a region of `2 * min_distance + 1`
(i.e. peaks are separated by at least `min_distance`).
If peaks are flat (i.e. multiple adjacent pixels have identical
intensities), the coordinates of all such pixels are returned.
If both `threshold_abs` and `threshold_rel` are provided, the maximum
of the two is chosen as the minimum intensity threshold of peaks.
Notes
-----
The peak local maximum function returns the coordinates of local peaks
(maxima) in an image. A maximum filter is used for finding local maxima.
This operation dilates the original image. After comparison of the dilated
and original image, this function returns the coordinates or a mask of the
peaks where the dilated image equals the original image.
Examples
--------
>>> img1 = np.zeros((7, 7))
>>> img1[3, 4] = 1
>>> img1[3, 2] = 1.5
>>> img1
array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> peak_local_max(img1, min_distance=1)
array([[3, 2],
[3, 4]])
>>> peak_local_max(img1, min_distance=2)
array([[3, 2]])
>>> img2 = np.zeros((20, 20, 20))
>>> img2[10, 10, 10] = 1
>>> peak_local_max(img2, exclude_border=0)
array([[10, 10, 10]])
Args:
image (np.ndarray): Input image.
min_distance (int, optional): Minimum number of pixels separating peaks in a region of `2 *
min_distance + 1` (i.e. peaks are separated by at least
`min_distance`). Defaults to 1.
threshold_abs (float, optional): Minimum intensity of peaks. By default, the absolute threshold is
the minimum intensity of the image. Defaults to None.
threshold_rel (float, optional): Minimum intensity of peaks, calculated as `max(image) * threshold_rel`.
Defaults to None.
exclude_border (int, optional): If nonzero, `exclude_border` excludes peaks from
within `exclude_border`-pixels of the border of the image.. Defaults to True.
indices (bool, optional): If True, the output will be an array representing peak
coordinates. If False, the output will be a boolean array shaped as
`image.shape` with peaks present at True elements.. Defaults to True.
num_peaks (int, optional): Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` peaks based on highest peak intensity.. Defaults to np.inf.
footprint (np.ndarray, optional): If provided, `footprint == 1` represents the local region within which
to search for peaks at every point in `image`. Overrides
`min_distance` (also for `exclude_border`).. Defaults to None.
labels (np.ndarray, optional): If provided, each unique region `labels == value` represents a unique
region to search for peaks. Zero is reserved for background.. Defaults to None.
Returns:
np.ndarray: If `indices = True` : (row, column, ...) coordinates of peaks.
If `indices = False` : Boolean array shaped like `image`, with peaks
represented by True values.
"""
if type(exclude_border) == bool:
exclude_border = min_distance if exclude_border else 0
out = np.zeros_like(image, dtype=np.bool)
# In the case of labels, recursively build and return an output
# operating on each label separately
if labels is not None:
label_values = np.unique(labels)
# Reorder label values to have consecutive integers (no gaps)
if np.any(np.diff(label_values) != 1):
mask = labels >= 1
labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)
labels = labels.astype(np.int32)
# New values for new ordering
label_values = np.unique(labels)
for label in label_values[label_values != 0]:
maskim = (labels == label)
out += feature.peak_local_max(image * maskim, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=False, num_peaks=np.inf,
footprint=footprint, labels=None)
if indices is True:
return np.transpose(out.nonzero())
else:
return out.astype(np.bool)
if np.all(image == image.flat[0]):
if indices is True:
return np.empty((0, 2), np.int)
else:
return out
# Non maximum filter
if footprint is not None:
image_max = nd.maximum_filter(image, footprint=footprint,
mode='constant')
else:
size = 2 * min_distance + 1
image_max = nd.maximum_filter(image, size=size, mode='constant')
mask = image == image_max
if exclude_border:
# zero out the image borders
for i in range(mask.ndim):
mask = mask.swapaxes(0, i)
remove = (footprint.shape[i] if footprint is not None
else 2 * exclude_border)
mask[:remove // 2] = mask[-remove // 2:] = False
mask = mask.swapaxes(0, i)
# find top peak candidates above a threshold
thresholds = []
if threshold_abs is None:
threshold_abs = image.min()
thresholds.append(threshold_abs)
if threshold_rel is not None:
thresholds.append(threshold_rel * image.max())
if thresholds:
mask_original = mask # save the local maxima's of the image
thrs_coords = {} # dictionary holds the coordinates correspond for each threshold
for threshold in thresholds[0]:
mask = mask_original
mask &= image > threshold
# get coordinates of peaks
coordinates = np.transpose(mask.nonzero())
if coordinates.shape[0] > num_peaks:
intensities = image.flat[np.ravel_multi_index(coordinates.transpose(),
image.shape)]
idx_maxsort = np.argsort(intensities)[::-1]
coordinates = coordinates[idx_maxsort][:num_peaks]
if indices is True:
thrs_coords[threshold] = coordinates
else:
nd_indices = tuple(coordinates.T)
out[nd_indices] = True
return out
if thresholds and thrs_coords:
return thrs_coords
def osmFISH_peak_based_detection_fast(ImgStack: np.ndarray,
fov_subdataset: pd.Series,
parameters_dict: dict,
dimensions: int=2,
stringency:int =0,
min_int:float=False,
max_int:float=False,
min_peaks:int=False)->pd.DataFrame:
"""This function is used to calculate the threshold to use for the dots
counting in a 3D image. It is based on the function used in the osmFISH
paper but doesn’t require simpy. It integrate the peak_thrs_local_max_fast
and the calculation of the peaks on the masked image.
Args:
ImgStack (np.ndarray): preprocessed image used to count the dots
fov_subdataset (pd.Series): Series with the metadata info relative to the image to process
parameters_dict (dict): Parameters used to define the peaks.
dimensions (int, optional): Image dimension (2 for 2D or 3 for 3D). Defaults to 2.
stringency (int, optional): Select a thr before or after the one calculated
automatically. Defaults to 0.
min_int (float, optional): Minimum intensity value to use for the binning of
the signal intensities. Defaults to False.
max_int (float, optional): Maximum intensity value to use for the binning of
the signal intensities. Defaults to False.
min_peaks (int, optional): Minimum number of peaks required for the
calculation of the counting threshold. Defaults to False.
Returns:
pd.DataFrame: counts data
"""
logger = selected_logger()
if min_peaks == False:
min_peaks = 3
fill_value = np.nan
# List with the total peaks calculated for each threshold
thr_used = []
binning = 100
# Define the range of thr to be tested
if min_int and max_int:
ThrArray = np.linspace(min_int,max_int,num=binning)
elif min_int:
ThrArray = np.linspace(min_int,ImgStack.max(),num=binning)
elif max_int:
ThrArray = np.linspace(np.min(ImgStack[np.nonzero(ImgStack)]),max_int,num=binning)
else:
ThrArray = np.linspace(np.min(ImgStack[np.nonzero(ImgStack)]),ImgStack.max(),num=binning)
fov = fov_subdataset.fov_num
round_num = fov_subdataset.round_num
channel = fov_subdataset.channel
target_name = fov_subdataset.target_name
fill_value = np.nan
data_models = Output_models()
counts_dict = data_models.dots_counts_dict
# Initialise an empty version of the counts dict
counts_dict['r_px_original'] = np.array([fill_value])
counts_dict['c_px_original'] = np.array([fill_value])
counts_dict['dot_id'] = np.array([fill_value])
counts_dict['dot_intensity'] = np.array([fill_value])
counts_dict['selected_thr'] = np.array([fill_value])
min_distance = parameters_dict['CountingFishMinObjDistance']
min_obj_size = parameters_dict['CountingFishMinObjSize']
max_obj_size = parameters_dict['CountingFishMaxObjSize']
#num_peaks_per_label = self.parameters_dict['num_peaks_per_label']
fov_subdataset_df = pd.DataFrame(fov_subdataset).T
# List of ndarrays with the coords of the peaks calculated for each threshold
PeaksCoords = []
Selected_Peaks2_mask = None
# Determine if working in 2D or 3D
if dimensions == 2:
if len(ImgStack.shape) > 2:
ImgStack = np.amax(ImgStack, axis=0)
# Define the Thr array
# ThrArray = np.linspace(ImgStack.min(), ImgStack.max(), num=binning)
# Calculate the number of peaks for each threshold
# Exclude border beacause of artefact of image processing
thrs_peaks = peak_thrs_local_max_fast(ImgStack, min_distance=min_distance,
threshold_abs=ThrArray, exclude_border=True, indices=True,
num_peaks=np.inf, footprint=None, labels=None)
lists = sorted(thrs_peaks.items()) # sorted by key, return a list of tuples. tuple[0]: threshold, tuple[1]: coords
x, PeaksCoords = zip(*lists) # unpack a list of pairs into two tuples
TotalPeaks = []
for j in range(len(PeaksCoords)):
TotalPeaks += (len(PeaksCoords[j]),) # get number of peaks
# print("Thresholds distribution %.3f seconds" % (timings['thrs_dist']))
# Consider the case of no detectected peaks or if there is only one Thr
# that create peaks (list TotalPeaks have only one element and )
# if np.array(TotalPeaks).sum()>0 or len(TotalPeaks)>1:
if len(TotalPeaks) > 3:
# Trim the threshold array in order to match the stopping point
# used the [0][0] to get the first number and then take it out from list
# ThrArray = ThrArray[:np.where(ThrArray == StopThr)[0][0]]
# Trim and convert to types as Simone's
TotalPeaks = np.array(TotalPeaks)
TotalPeaks = list(TotalPeaks[TotalPeaks > min_peaks])
ThrArray = ThrArray[:len(TotalPeaks)]
PeaksCoords = np.array(PeaksCoords)
PeaksCoords = PeaksCoords[:len(TotalPeaks)]
PeaksCoords = list(PeaksCoords)
if len(TotalPeaks) > 3:
# Calculate the gradient of the number of peaks distribution
# grad = np.gradient(TotalPeaks)
grad = np.gradient(TotalPeaks,edge_order=1)
# Restructure the data in order to avoid to consider the min_peak in the
# calculations
# Coord of the gradient min_peak
grad_min_peak_coord = np.argmin(grad)
# Trim the data to remove the peak.
trimmed_thr_array = ThrArray[grad_min_peak_coord:]
trimmed_grad = grad[grad_min_peak_coord:]
if trimmed_thr_array.shape > (1,):
# Trim the coords array in order to maintain the same length of the
# tr and pk
Trimmed_PeaksCoords = PeaksCoords[grad_min_peak_coord:]
trimmed_total_peaks = TotalPeaks[grad_min_peak_coord:]
# To determine the threshold we will determine the Thr with the biggest
# distance to the segment that join the end points of the calculated
# # gradient
# Calculate the coords of the end points of the gradient
p1 = np.array([trimmed_thr_array[0],trimmed_grad[0]])
p2 = np.array([trimmed_thr_array[-1],trimmed_grad[-1]])
# Create a line that join the points
allpoints = np.arange(0,len(trimmed_thr_array))
allpoints_coords = np.array([trimmed_thr_array[allpoints],trimmed_grad[allpoints]]).T
distances = []
for point in allpoints_coords:
distances.append(np.linalg.norm(np.cross(p2-p1, p1-point))/np.linalg.norm(p2-p1))
# Remove the end points from the lists
trimmed_thr_array = trimmed_thr_array[1:-1]
trimmed_grad = trimmed_grad[1:-1]
trimmed_total_peaks = trimmed_total_peaks[1:-1]
trimmed_distances = distances[1:-1]
# Determine the coords of the selected Thr
# Converted Trimmed_distances to array because it crashed
# on Sanger.
if trimmed_distances: # Most efficient way will be to consider the length of Thr list
Thr_idx = np.argmax(np.array(trimmed_distances))
Calculated_Thr = trimmed_thr_array[Thr_idx]
# The selected threshold usually causes oversampling of the number of dots
# I added a stringency parameter (int n) to use to select the Thr+n
# for the counting. It selects a stringency only if the Trimmed_ThrArray
# is long enough
if Thr_idx + stringency < len(trimmed_thr_array):
Selected_Thr = trimmed_thr_array[Thr_idx + stringency]
Selected_Peaks = Trimmed_PeaksCoords[Thr_idx + stringency]
else:
Selected_Thr = trimmed_thr_array[Thr_idx]
Selected_Peaks = Trimmed_PeaksCoords[Thr_idx]
# Calculate the selected peaks after removal of the big and small objects
# Threshold the image using the selected threshold
# if Selected_Thr > 0:
# ImgMask = ImgStack > Selected_Thr
ImgMask = ImgStack > Selected_Thr
Labels = nd.label(ImgMask)[0]
Properties = measure.regionprops(Labels)
for ob in Properties:
if ob.area < min_obj_size or ob.area > max_obj_size:
ImgMask[ob.coords[:, 0], ob.coords[:, 1]] = 0
Labels = nd.label(ImgMask)[0]
# # calling peak_local_max without Labels argument
# Selected_Peaks2_mask = feature.peak_local_max(ImgStack, min_distance=min_distance,
# threshold_abs=Selected_Thr, exclude_border=True, indices=False,
# footprint=None,num_peaks=np.inf).astype(int)
# # instead, make sure the selected peaks does not meet zeros at labels (background)
# Labels_mask = (Labels > 0).astype(int)
# Selected_Peaks2_mask = Selected_Peaks2_mask * Labels_mask
# Selected_Peaks2 = np.vstack(np.where(Selected_Peaks2_mask)).T
Selected_Peaks2 = feature.peak_local_max(ImgStack, min_distance=min_distance,
threshold_abs=Selected_Thr, exclude_border=True, indices=True,
footprint=None,labels=Labels,num_peaks=np.inf).astype(int)
if Selected_Peaks2.size:
# Intensity counting of the max peaks
# Selected_peaks_coords = np.where(Selected_Peaks2)
# Selected_peaks_int = ImgStack[Selected_peaks_coords[0], Selected_peaks_coords[1]]
Selected_peaks_int = ImgStack[Selected_Peaks2[:, 0], Selected_Peaks2[:, 1]]
# Peaks have been identified
total_dots = Selected_Peaks2.shape[0]
dot_id_array = np.array([str(fov)+'_'+str(round_num)+'_'+ channel +'_'+str(nid) for nid in range(total_dots)])
thr_array = np.repeat(Selected_Thr,total_dots)
channel_array = np.repeat(channel,total_dots)
counts_dict['r_px_original'] = Selected_Peaks2[:,0]
counts_dict['c_px_original'] = Selected_Peaks2[:,1]
counts_dict['dot_id'] = dot_id_array
counts_dict['dot_intensity'] = Selected_peaks_int
counts_dict['selected_thr'] = thr_array
else:
logger.info(f' fov {fov} does not have counts (mapping)')
else:
logger.info(f' fov {fov} Trimmed distance equal to zero')
else:
logger.info(f' fov {fov} calculated Thr array to small for selection of Thr')
else:
logger.info(f' fov {fov} does not have counts for calculating Thr')
else:
logger.info(f' fov {fov} does not have counts for calculating Thr')
counts_df = | pd.DataFrame(counts_dict) | pandas.DataFrame |
#!/usr/bin/env python2
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from os.path import expanduser
import scipy
def treegrass_frac(ndvi, day_rs):
"""
Process based on Donohue et al. (2009) to separate out tree and grass cover,
using moving windows (adapted here for daily time-step)
"""
# first calculate the 7-month moving minimum window across the time-series
# period = 7
fp1 = moving_something(np.min, ndvi, period=3, day_rs=day_rs)
# period = 9
fp2 = moving_something(lambda x: sum(x)/(9*day_rs), fp1, period=9, day_rs=day_rs)
fr1 = ndvi - fp2
ftree = [p2 - np.abs(r1) if r1 < 0 else p2 for p2, r1 in zip(fp2, fr1)]
fgrass = ndvi - ftree
return pd.DataFrame({'total':ndvi, 'tree':ftree, 'grass':fgrass})
def moving_something(_fun, tseries, period, day_rs=16, is_days=True):
"""
Applies a function to a moving window of the time-series:
ft_ = function([ f(t-N), f(t). f(t+N)])
"""
# if the time-series is at a day-time step, update the window to a step-size of 16 days
if is_days:
p0 = period*day_rs
else:
p0 = period
# find upper and lower bounds of the moving window
half = p0//2
tlen = len(tseries)
twin = [0]*tlen
for im in range(tlen):
# find the something for the window that satisfy the edge conditions
if im < half:
# fold back onto the end of the time-series
twin[im] = _fun(np.hstack([tseries[tlen-(half-im):tlen],\
tseries[0:im+half]]))
elif im > tlen-half:
# fold back into the beginning of the time-series
twin[im] = _fun(np.hstack([tseries[im-half:tlen],\
tseries[0:half-(tlen-im)]]))
else:
twin[im] = _fun(tseries[im-half:im+half])
return twin
def import_one_year(file_name):
"""
Imports the one-year climatology, resetting time columns
as a multi-index pandas dataframe
"""
# universal time labels
time_label = ['Month', 'Day', 'Hour', 'Min']
# import data
clim_raw = | pd.read_csv(clim_met_file) | pandas.read_csv |
# Copyright (C) 2016 The Regents of the University of Michigan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
# note: this file needs to be modified to work on remote data files; currently implemented only for reading/writing locally
'''
Takes gzipped Coursera clickstream/log files as input and returns a set of csvs into current working directory.
Each weekly csv is a list of users (rows), with columns corresponding to the features for that week.
Features come from Mi and Yeung (2015).
'''
import gzip, argparse, json, re, math, datetime, os, bisect
import pandas as pd
MILLISECONDS_IN_SECOND = 1000
def fetch_start_end_date(course_name, run, date_csv = "coursera_course_dates.csv"):
"""
Fetch course start end end date (so user doesn't have to specify them directly).
:param course_name: Short name of course.
:param run: run number
:param date_csv: Path to csv of course start/end dates.
:return: tuple of datetime objects (course_start, course_end)
"""
full_course_name = '{0}-{1}'.format(course_name, run)
date_df = pd.read_csv(date_csv, usecols=[0, 2, 3]).set_index('course')
course_start = datetime.datetime.strptime(date_df.loc[full_course_name].start_date, '%m/%d/%y')
course_end = datetime.datetime.strptime(date_df.loc[full_course_name].end_date, '%m/%d/%y')
return (course_start, course_end)
def course_len(course_start, course_end):
'''
Return the duration of a course, in number of whole weeks.
Note: Final week may be less than 7 days, depending on course start and end dates.
:param course_start: datetime object for first day of course (generated from user input)
:param course_end: datetime object for last day of course (generated from user input)
:return: integer of course duration in number of weeks (rounded up if necessary)
'''
course_start, course_end = course_start, course_end
n_days = (course_end - course_start).days
n_weeks = math.ceil(n_days / 7)
return n_weeks
def timestamp_week(timestamp, course_start, course_end):
'''
Get (zero-indexed) week number for a given timestamp.
:param timestamp: UTC timestamp, in seconds.
:param course_start: datetime object for first day of course (generated from user input)
:param course_end: datetime object for last day of course (generated from user input)
:return: integer week number of timestamp. If week not in range of course dates provided, return None.
'''
timestamp = datetime.datetime.fromtimestamp(timestamp / MILLISECONDS_IN_SECOND)
n_weeks = course_len(course_start, course_end)
week_starts = [course_start + datetime.timedelta(days=x)
for x in range(0, n_weeks * 7, 7)]
week_number = bisect.bisect_left(week_starts, timestamp) - 1
if week_number >= 0 and week_number <= n_weeks:
return week_number
else: # event is not within official course dates
return None
def extract_users_dropouts(coursera_clickstream_file, course_start, course_end):
'''
Assemble list of all users, and dictionary of their dropout weeks.
:param coursera_clickstream_file: gzipped Coursera clickstream file; see ./sampledata for example
:param course_start: datetime object for first day of course (generated from user input)
:param course_end: datetime object for last day of course (generated from user input)
:return: tuple of (users, dropout_dict):
users: Python set of all unique user IDs that registered any activity in clickstream log
df_dropout: pd.DataFrame of userID, dropout_week for each user (dropout_week = 0 if no valid activity)
'''
users = set()
user_dropout_weeks = {}
linecount = 0
with(gzip.open(coursera_clickstream_file, 'r')) as f:
for line in f:
try:
log_entry = json.loads(line.decode("utf-8"))
user = log_entry.get('username')
timestamp = log_entry.get('timestamp', 0)
week = timestamp_week(timestamp, course_start, course_end)
users.add(user)
except ValueError as e1:
print('Warning: invalid log line {0}: {1}'.format(linecount, e1))
except Exception as e:
print('Warning: invalid log line {0}: {1}\n{2}'.format(linecount, e, line))
if user not in user_dropout_weeks.keys(): #no entry for user
if not week: #current entry is outside valid course dates; initialize entry with dropout_week = 0
user_dropout_weeks[user] = 0
else: #current entry is within valid course dates; initialize entry with dropout_week = week
user_dropout_weeks[user] = week
else: #entry already exists for user; check and update if necessary
# update entry for user if week is valid and more recent than current entry
if week and user_dropout_weeks[user] < week:
user_dropout_weeks[user] = week
linecount += 1
df_dropout = pd.DataFrame.from_dict(user_dropout_weeks, orient='index')
#rename columns; handled this way because DataFrame.from_dict doesn't support column naming directly
df_dropout.index.names = ['userID']
df_dropout.columns = ['dropout_week']
output = (users, df_dropout)
return output
def forum_line_proc(line, forumviews, linecount):
fre = re.compile('/forum/')
try:
l = json.loads(line.decode("utf-8"))
if l['key'] == 'pageview' and fre.search(l['page_url']):
forumviews.append(l)
except ValueError as e1:
print('Warning: invalid log line {0}: {1}'.format(linecount, e1))
except Exception as e:
print('Warning: invalid log line {0}: {1}\n{2}'.format(linecount, e, line))
return forumviews
def quizattempt_line_proc(line, course_start, course_end, quiz_output, linecount):
qre = re.compile('/quiz/attempt') # in 'url';avoids counting /quiz/feedback
try:
j = json.loads(line.decode("utf-8"))
user = j.get('username')
timestamp = j.get('timestamp')
week = timestamp_week(timestamp, course_start, course_end)
if week:
# check if access_type is one of an assessment type, and if it is
# then append entry of that type to quiz_output[user][week]
if j.get('key') == 'pageview' and qre.search(j.get('page_url')):
quiz_output[user][week].append('quizzes_quiz_attempt')
except ValueError as e1:
print('Warning: invalid log line {0}: {1}'.format(linecount, e1))
except Exception as e:
print('Warning: invalid log line {0}: {1}\n{2}'.format(linecount, e, line))
return quiz_output
def extract_forum_views_and_quiz_attempts(coursera_clickstream_file, users, course_start, course_end):
"""
Extract forum views, and quiz views in a single pass.
:return:
"""
# initialize all data structures
n_weeks = course_len(course_start, course_end)
forum_output = {user: {n: [] for n in range(n_weeks + 1)} for user in users} # nested dict in format {user: {week: [url1, url2, url3...]}}
forumviews = []
linecount = 1
# compile regex for assessment types
quiz_output = {user: {n: [] for n in range(n_weeks + 1)} for user in users} # nested dict in format {user: {week: [accessType, accessType...]}}
# process each clickstream line, extracting any forum views, active days, or quiz views
with gzip.open(coursera_clickstream_file, 'r') as f:
for line in f:
forumviews = forum_line_proc(line, forumviews, linecount)
quiz_output = quizattempt_line_proc(line, course_start, course_end, quiz_output, linecount)
linecount += 1
# post-process data from each forum: add each forumview URL accessed to (user, week) entry in forum_output
for p in forumviews:
user = p.get('username')
url = p.get('page_url')
timestamp = p.get('timestamp')
week = timestamp_week(timestamp, course_start, course_end)
if week: # if week falls within active dates of course, add to user entry
forum_output[user][week].append(url)
forum_output_list = [(k, week, len(views)) for k, v in forum_output.items() for week, views in v.items()]
df_forum = | pd.DataFrame(data=forum_output_list, columns=['userID', 'week', 'forum_views']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytask
from src.config import BLD
from src.config import SRC
from src.shared import create_age_groups
from src.shared import load_dataset
LOCATIONS = [
"cnt_home",
"cnt_work",
"cnt_school",
"cnt_leisure",
"cnt_transport",
"cnt_otherplace",
]
MOSSONG_IN = SRC / "original_data" / "mossong_2008"
MOSSONG_OUT = BLD / "data" / "mossong_2008"
@pytask.mark.depends_on(
{
"hh_common": MOSSONG_IN / "hh_common.csv",
"hh_extra": MOSSONG_IN / "hh_extra.csv",
"participant_common": MOSSONG_IN / "participant_common.csv",
"participant_extra": MOSSONG_IN / "participant_extra.csv",
"contact_common": MOSSONG_IN / "contact_common.csv",
"sday": MOSSONG_IN / "sday.csv",
"eu_hh_size_shares": BLD
/ "data"
/ "population_structure"
/ "eu_hh_size_shares.pkl",
"shared.py": SRC / "shared.py",
}
)
@pytask.mark.produces(
{
"contact_data": MOSSONG_OUT / "contact_data.pkl",
"hh_sample": MOSSONG_OUT / "hh_sample_ger.csv",
"hh_probabilities": MOSSONG_OUT / "hh_probabilities.csv",
}
)
def task_prepare_mossong_data(depends_on, produces):
datasets = {
key: load_dataset(val)
for key, val in depends_on.items()
if not key.endswith(".py")
}
# clean data
hh = _prepare_hh_data(datasets["hh_common"], datasets["hh_extra"])
participants = _prepare_participant_data(
datasets["participant_common"], datasets["participant_extra"]
)
contacts = _prepare_contact_data(datasets["contact_common"])
sday = _prepare_day_data(datasets["sday"])
# contact_data
contacts = _merge_mossong_data(
contacts=contacts, participants=participants, sday=sday, hh=hh
)
contacts = _make_columns_in_contact_data_nice(contacts)
contacts = contacts[contacts["country"].isin(["LU", "DE_TOT", "BE", "NL"])]
contacts = contacts.dropna(how="any")
contacts.to_pickle(produces["contact_data"])
# household sample for initial states
hh = hh.query("country == 'DE_TOT'")
hh = _from_wide_to_long_format(hh)
hh = _drop_hh_with_missing_ages(hh)
hh.to_csv(produces["hh_sample"])
# household probability weights
hh["collapsed_hh_size"] = hh["hh_size"].where(
hh["hh_size"] <= 5, pd.Interval(5.0, np.inf)
)
sample_hh_size_shares = hh["collapsed_hh_size"].value_counts(normalize=True)
inv_prob_weights = datasets["eu_hh_size_shares"]["DE_TOT"] / sample_hh_size_shares
hh["hh_inv_prob_weights"] = hh["collapsed_hh_size"].replace(inv_prob_weights)
hh["probability"] = hh["hh_inv_prob_weights"] / hh["hh_inv_prob_weights"].sum()
hh_probs = hh[["hh_id", "probability"]]
hh_probs.to_csv(produces["hh_probabilities"])
def _prepare_hh_data(common, extra):
common = common.copy()
common["country"] = common["country"].replace({"DE": "DE_TOT", "GB": "UK"})
hh = pd.merge(left=common, right=extra, on="hh_id")
return hh
def _prepare_participant_data(common, extra):
common = common.copy(deep=True)
extra = extra.copy(deep=True)
extra["part_occupation"].replace(
{
1: "working",
2: "retired",
3: "at home (housewife)",
4: "unemployed",
5: "fulltime education",
6: "other",
},
inplace=True,
)
missed_d = {1: 0, 2: "1-4", 3: "5-9", 4: ">10"}
rename = [
("nr_missed_to_record", "diary_missed_unsp"),
("nr_missed_to_record_physical", "diary_missed_skin"),
("nr_missed_to_record_not_physical", "diary_missed_noskin"),
]
for new, old in rename:
extra[new] = extra[old].replace(missed_d)
extra.drop(columns=[old], inplace=True)
participants = pd.merge(left=common, right=extra, on="part_id")
return participants
def _prepare_contact_data(common):
df = common.copy(deep=True)
df["frequency"] = _make_frequencies_categorical(df["frequency_multi"])
df["phys_contact"].replace({1: True, 2: False}, inplace=True)
df["duration"] = _make_durations_categorical(df["duration_multi"])
# the order of the location determines for contacts in more than one context to
# which they are counted. This affects < 10% of contacts.
assert (df[LOCATIONS].sum(axis=1) > 1).mean() < 0.1
df["place"] = df.apply(_create_place, axis=1)
df = df.rename(columns={loc: loc[4:] for loc in LOCATIONS})
df.drop(columns=["frequency_multi", "duration_multi"], inplace=True)
return df
def _make_frequencies_categorical(sr):
rename_dict = {
1: "(almost) daily",
2: "1-2 times a week",
3: "1-2 times a month",
4: "less than once a month",
5: "never met before",
}
nice_sr = sr.replace(rename_dict)
frequencies = [
"(almost) daily",
"1-2 times a week",
"1-2 times a month",
"less than once a month",
"never met before",
]
return pd.Categorical(nice_sr, categories=frequencies, ordered=True)
def _make_durations_categorical(sr):
durations = ["<5min", "5-15min", "15min-1h", "1-4h", ">4h"]
rename_dict = {
1: "<5min",
2: "5-15min",
3: "15min-1h",
4: "1-4h",
5: ">4h",
}
nice_sr = sr.replace(rename_dict)
return | pd.Categorical(nice_sr, categories=durations, ordered=True) | pandas.Categorical |
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from legate import pandas as lp
n = 17
a = [str(i) * 3 for i in range(n)]
for i in range(n):
if i % 4 == 0:
a[i] = None
s = pd.Series(a)
s = s.astype( | pd.StringDtype() | pandas.StringDtype |
from lolesports_api.downloaders import downloadMeta, downloadDetails
from lolesports_api.analysis import diffPlot
import glob as _glob
import json as _json
import os as _os
import numpy as _np
import pandas as _pd
def dictToAttr(self, dict):
for key, value in dict.items():
setattr(self, key, value)
class League():
def __init__(self, leagueSlug):
self.slug = leagueSlug
queryData = downloadMeta('getLeagues')
self.data = [league for league in queryData['data']['leagues'] if league['slug'] == leagueSlug][0]
dictToAttr(self, self.data)
queryData = downloadMeta('getTournamentsForLeague', {'leagueId': self.id})
self.tournaments = queryData['data']['leagues'][0]['tournaments']
def getTournament(self, tournamentId):
tournamentData = [t for t in self.tournaments if t['id'] == tournamentId][0]
return Tournament(self.id, tournamentId, tournamentData=tournamentData)
def getTournamentBySlug(self, tournamentSlug):
tournamentData = [t for t in self.tournaments if t['slug'] == tournamentSlug][0]
return Tournament(self.id, tournamentSlug, tournamentData=tournamentData)
def download(self, folder='.', **kwargs):
folder = f'{folder}/{self.slug}'
_os.makedirs(folder, exist_ok=True)
for tournamentSlug in [t['slug'] for t in self.tournaments]:
split = self.getTournament(tournamentSlug)
split.download(folder=folder, **kwargs)
class Tournament():
def __init__(self, leagueId, tournamentId, tournamentData=[]):
self.leagueId = leagueId
self.id = tournamentId
if not tournamentData:
queryData = downloadMeta('getTournamentsForLeague', {'leagueId': leagueId})
tournamentData = [t for t in queryData['data']['leagues'][0]['tournaments'] if t['id'] == self.id][0]
self.data = tournamentData
dictToAttr(self, self.data)
queryData = downloadMeta('getCompletedEvents', {'tournamentId': self.id})
self.events = queryData['data']['schedule']['events']
def getEvent(self, eventId):
return Event(self.id, eventId)
def getEventByTeamGame(self, teamSlug, gameNum):
teamEvents = [event for event in self.events if teamSlug.lower() in \
[team['code'].lower() for team in event['match']['teams']]]
assert gameNum <= len(teamEvents), "Team hasn't played that many games!"
return self.getEvent(teamEvents[gameNum-1]['match']['id'])
def download(self, folder='.', **kwargs):
folder = f'{folder}/{self.slug}'
_os.makedirs(folder, exist_ok=True)
for eventId in [e['match']['id'] for e in self.events]:
event = self.getEvent(eventId)
event.download(folder=folder, **kwargs)
class Event():
def __init__(self, tournamentId, eventId):
self.tournamentId = tournamentId
self.id = eventId
queryData = downloadMeta('getEventDetails', {'id': eventId})
self.data = queryData['data']['event']
dictToAttr(self, self.data)
self.games = self.data['match']['games']
def getGame(self, gameId, **kwargs):
gameData = {
'winner': [team['name'] for team in self.data['match']['teams'] if team['result']['gameWins'] == 1][0],
'teams': self.data['match']['teams']
}
return Game(self.id, gameId, gameData, **kwargs)
def getGameByNum(self, gameNum, **kwargs):
gameId = [game['id'] for game in self.games if game['number'] == gameNum][0]
return self.getGame(gameId, **kwargs)
def download(self, **kwargs):
for gameId in [g['id'] for g in self.games]:
game = self.getGame(gameId)
game.download(**kwargs)
class Game():
def __init__(self, eventId, gameId, gameData=[], autoLoad=True):
self.eventId = eventId
self.id = gameId
if not gameData:
queryData = downloadMeta('getEventDetails', {'id': eventId})
teamData = queryData['data']['event']['match']['teams']
gameData = {
'winner': [team['name'] for team in teamData if team['result']['gameWins'] == 1][0],
'teams': teamData
}
dictToAttr(self, gameData)
if autoLoad:
try:
self.loadData()
self.parseData()
except AssertionError:
pass
def download(self, folder='.', verbose=True, overwrite=False):
try:
fname = f'{folder}/{self.id}.json'
if (not _os.path.isfile(fname)) or (overwrite):
gameData = downloadDetails(self.id)
with open(fname, 'w') as fp:
_json.dump(gameData, fp)
if verbose:
print(f'Finished writing to {fname}')
else:
if verbose:
print(f'Already exists. Skipping {self.id}')
except Exception as e:
print(f'Failed to download {self.id}.')
print(e)
else:
self.loadData()
self.parseData()
def loadData(self):
fname = _glob.glob(f'*/*/{self.id}.json')
assert len(fname) > 0, 'No game data file found!'
assert len(fname) < 2, f'Multiple game data files found: {fname}'
with open(fname[0], 'r') as f:
gameData = _json.load(f)
self.json = gameData
self._filename = fname[0]
def parseData(self):
## Parse Time
time = _np.array([frame['rfc460Timestamp'][:-1] for frame in self.json['frames']], dtype=_np.datetime64)
# time is seconds since the SECOND timestamp. I believe the first timestamp is when they start loading in...
time = (time - time[1])/_np.timedelta64(1, 's')
pauseIdxs = [idx for idx, frame in enumerate(self.json['frames']) if frame['gameState']=='paused']
for pIdx in pauseIdxs:
time[pIdx+1:] -= time[pIdx+1] - time[pIdx]
self._timeIndex = | _pd.TimedeltaIndex(time) | pandas.TimedeltaIndex |
'''
US Job Counts by Industry, 2006-2015
===============================
Interactive heat map shows how total US job change compared to
job change by industry surrounding stock crash in 2008.
'''
import pandas as pd
import altair as alt
from datetime import datetime as dt
us_employment = pd.read_csv("https://raw.githubusercontent.com/vega/vega-datasets/master/data/us-employment.csv")
us_employment["date"] = | pd.to_datetime(us_employment["month"]) | pandas.to_datetime |
import SQLiteFunctions as SQL
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Create new database object and connect to database
algo_db = SQL.SqliteDatabase()
algo_db.connect_database(r'C:/Users/jnwag/OneDrive/Documents/GitHub/AlgorandGovernance/AlgoDB.db')
table_headers = ['id', 'account_id', 'address', 'committed_algo', 'is_eligible', 'reason', 'registration_date',
'session_count', 'link_count']
# Fetch governor data from db
test = algo_db.fetch_all('Governors')
# convert table data into dataframe and keep only the eligible users
df = | pd.DataFrame(test, columns=table_headers) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scipy
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
from .. import distributions as dist
from .. import palettes
try:
import statsmodels.nonparametric.api
assert statsmodels.nonparametric.api
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
class TestBoxPlotter(object):
"""Test boxplot (also base class for things like violinplots)."""
rs = np.random.RandomState(30)
n_total = 60
x = rs.randn(n_total / 3, 3)
x_df = pd.DataFrame(x, columns=pd.Series(list("XYZ"), name="big"))
y = pd.Series(rs.randn(n_total), name="y_data")
g = pd.Series(np.repeat(list("abc"), n_total / 3), name="small")
h = pd.Series(np.tile(list("mn"), n_total / 2), name="medium")
df = pd.DataFrame(dict(y=y, g=g, h=h))
x_df["W"] = g
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8,
fliersize=5, linewidth=None)
def test_wide_df_data(self):
p = dist._BoxPlotter(**self.default_kws)
# Test basic wide DataFrame
p.establish_variables(data=self.x_df)
# Check data attribute
for x, y, in zip(p.plot_data, self.x_df[["X", "Y", "Z"]].values.T):
npt.assert_array_equal(x, y)
# Check semantic attributes
nt.assert_equal(p.orient, "v")
nt.assert_is(p.plot_hues, None)
nt.assert_is(p.group_label, "big")
nt.assert_is(p.value_label, None)
# Test wide dataframe with forced horizontal orientation
p.establish_variables(data=self.x_df, orient="horiz")
nt.assert_equal(p.orient, "h")
# Text exception by trying to hue-group with a wide dataframe
with nt.assert_raises(ValueError):
p.establish_variables(hue="d", data=self.x_df)
def test_1d_input_data(self):
p = dist._BoxPlotter(**self.default_kws)
# Test basic vector data
x_1d_array = self.x.ravel()
p.establish_variables(data=x_1d_array)
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test basic vector data in list form
x_1d_list = x_1d_array.tolist()
p.establish_variables(data=x_1d_list)
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test an object array that looks 1D but isn't
x_notreally_1d = np.array([self.x.ravel(),
self.x.ravel()[:self.n_total / 2]])
p.establish_variables(data=x_notreally_1d)
nt.assert_equal(len(p.plot_data), 2)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_equal(len(p.plot_data[1]), self.n_total / 2)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_2d_input_data(self):
p = dist._BoxPlotter(**self.default_kws)
x = self.x[:, 0]
# Test vector data that looks 2D but doesn't really have columns
p.establish_variables(data=x[:, np.newaxis])
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.x.shape[0])
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test vector data that looks 2D but doesn't really have rows
p.establish_variables(data=x[np.newaxis, :])
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.x.shape[0])
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_3d_input_data(self):
p = dist._BoxPlotter(**self.default_kws)
# Test that passing actually 3D data raises
x = np.zeros((5, 5, 5))
with nt.assert_raises(ValueError):
p.establish_variables(data=x)
def test_list_of_array_input_data(self):
p = dist._BoxPlotter(**self.default_kws)
# Test 2D input in list form
x_list = self.x.T.tolist()
p.establish_variables(data=x_list)
nt.assert_equal(len(p.plot_data), 3)
lengths = [len(v_i) for v_i in p.plot_data]
nt.assert_equal(lengths, [self.n_total / 3] * 3)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_wide_array_input_data(self):
p = dist._BoxPlotter(**self.default_kws)
# Test 2D input in array form
p.establish_variables(data=self.x)
nt.assert_equal(np.shape(p.plot_data), (3, self.n_total / 3))
npt.assert_array_equal(p.plot_data, self.x.T)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_single_long_direct_inputs(self):
p = dist._BoxPlotter(**self.default_kws)
# Test passing a series to the x variable
p.establish_variables(x=self.y)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y_data")
nt.assert_is(p.group_label, None)
# Test passing a series to the y variable
p.establish_variables(y=self.y)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y_data")
nt.assert_is(p.group_label, None)
# Test passing an array to the y variable
p.establish_variables(y=self.y.values)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_is(p.value_label, None)
nt.assert_is(p.group_label, None)
def test_single_long_indirect_inputs(self):
p = dist._BoxPlotter(**self.default_kws)
# Test referencing a DataFrame series in the x variable
p.establish_variables(x="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y")
nt.assert_is(p.group_label, None)
# Test referencing a DataFrame series in the y variable
p.establish_variables(y="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y")
nt.assert_is(p.group_label, None)
def test_longform_groupby(self):
p = dist._BoxPlotter(**self.default_kws)
# Test a vertically oriented grouped and nested plot
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(len(p.plot_data), 3)
nt.assert_equal(len(p.plot_hues), 3)
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y")
nt.assert_equal(p.group_label, "g")
nt.assert_equal(p.hue_title, "h")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test a grouped and nested plot with direct array value data
p.establish_variables("g", self.y.values, "h", self.df)
nt.assert_is(p.value_label, None)
nt.assert_equal(p.group_label, "g")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test a grouped and nested plot with direct array hue data
p.establish_variables("g", "y", self.h.values, self.df)
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test categorical grouping data
if pandas_has_categoricals:
df = self.df.copy()
df.g = df.g.astype("category")
# Test that horizontal orientation is automatically detected
p.establish_variables("y", "g", "h", data=df)
nt.assert_equal(len(p.plot_data), 3)
nt.assert_equal(len(p.plot_hues), 3)
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y")
nt.assert_equal(p.group_label, "g")
nt.assert_equal(p.hue_title, "h")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
def test_order(self):
p = dist._BoxPlotter(**self.default_kws)
# Test inferred order from a wide dataframe input
p.establish_variables(data=self.x_df)
nt.assert_equal(p.group_names, ["X", "Y", "Z"])
# Test specified order with a wide dataframe input
p.establish_variables(data=self.x_df, order=["Y", "Z", "X"])
nt.assert_equal(p.group_names, ["Y", "Z", "X"])
for group, vals in zip(["Y", "Z", "X"], p.plot_data):
npt.assert_array_equal(vals, self.x_df[group])
with nt.assert_raises(ValueError):
p.establish_variables(data=self.x, order=[1, 2, 0])
# Test inferred order from a grouped longform input
p.establish_variables("g", "y", data=self.df)
nt.assert_equal(p.group_names, ["a", "b", "c"])
# Test specified order from a grouped longform input
p.establish_variables("g", "y", data=self.df, order=["b", "a", "c"])
nt.assert_equal(p.group_names, ["b", "a", "c"])
for group, vals in zip(["b", "a", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test inferred order from a grouped input with categorical groups
if pandas_has_categoricals:
df = self.df.copy()
df.g = df.g.astype("category")
df.g = df.g.cat.reorder_categories(["c", "b", "a"])
p.establish_variables("g", "y", data=df)
nt.assert_equal(p.group_names, ["c", "b", "a"])
for group, vals in zip(["c", "b", "a"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
def test_hue_order(self):
p = dist._BoxPlotter(**self.default_kws)
# Test inferred hue order
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.hue_names, ["m", "n"])
# Test specified hue order
p.establish_variables("g", "y", "h", data=self.df,
hue_order=["n", "m"])
nt.assert_equal(p.hue_names, ["n", "m"])
# Test inferred hue order from a categorical hue input
if pandas_has_categoricals:
df = self.df.copy()
df.h = df.h.astype("category")
df.h = df.h.cat.reorder_categories(["n", "m"])
p.establish_variables("g", "y", "h", data=df)
nt.assert_equal(p.hue_names, ["n", "m"])
def test_orient_inference(self):
p = dist._BoxPlotter(**self.default_kws)
cat_series = pd.Series(["a", "b", "c"] * 10)
num_series = pd.Series(self.rs.randn(30))
x, y = cat_series, num_series
nt.assert_equal(p.infer_orient(x, y, "horiz"), "h")
nt.assert_equal(p.infer_orient(x, y, "vert"), "v")
nt.assert_equal(p.infer_orient(x, None), "h")
nt.assert_equal(p.infer_orient(None, y), "v")
nt.assert_equal(p.infer_orient(x, y), "v")
if pandas_has_categoricals:
cat_series = cat_series.astype("category")
y, x = cat_series, num_series
nt.assert_equal(p.infer_orient(x, y), "h")
def test_default_palettes(self):
p = dist._BoxPlotter(**self.default_kws)
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 3))
# Test palette mapping the hue position
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors(None, None, 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 2))
def test_default_palette_with_many_levels(self):
with palettes.color_palette(["blue", "red"], 2):
p = dist._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
npt.assert_array_equal(p.colors, palettes.husl_palette(3, l=.7))
def test_specific_color(self):
p = dist._BoxPlotter(**self.default_kws)
# Test the same color for each x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", None, 1)
blue_rgb = mpl.colors.colorConverter.to_rgb("blue")
nt.assert_equal(p.colors, [blue_rgb] * 3)
# Test a color-based blend for the hue mapping
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors("#ff0022", None, 1)
rgba_array = palettes.light_palette("#ff0022", 2)
npt.assert_array_almost_equal(p.colors,
rgba_array[:, :3])
def test_specific_palette(self):
p = dist._BoxPlotter(**self.default_kws)
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, "dark", 1)
nt.assert_equal(p.colors, palettes.color_palette("dark", 3))
# Test that non-None `color` and `hue` raises an error
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors(None, "muted", 1)
nt.assert_equal(p.colors, palettes.color_palette("muted", 2))
# Test that specified palette overrides specified color
p = dist._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", "deep", 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 3))
def test_dict_as_palette(self):
p = dist._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
pal = {"m": (0, 0, 1), "n": (1, 0, 0)}
p.establish_colors(None, pal, 1)
nt.assert_equal(p.colors, [(0, 0, 1), (1, 0, 0)])
def test_palette_desaturation(self):
p = dist._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", data=self.df)
p.establish_colors((0, 0, 1), None, .5)
nt.assert_equal(p.colors, [(.25, .25, .75)] * 3)
p.establish_colors(None, [(0, 0, 1), (1, 0, 0), "w"], .5)
nt.assert_equal(p.colors, [(.25, .25, .75),
(.75, .25, .25),
(1, 1, 1)])
def test_nested_width(self):
p = dist._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .4 * .98)
kws = self.default_kws.copy()
kws["width"] = .6
p = dist._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .3 * .98)
def test_hue_offsets(self):
p = dist._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = dist._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
p = dist._BoxPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
npt.assert_array_almost_equal(p.hue_offsets, [-.2, 0, .2])
def test_axes_data(self):
ax = dist.boxplot("g", "y", data=self.df)
nt.assert_equal(len(ax.artists), 3)
plt.close("all")
ax = dist.boxplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.artists), 6)
plt.close("all")
def test_box_colors(self):
ax = dist.boxplot("g", "y", data=self.df, saturation=1)
pal = palettes.color_palette("deep", 3)
for patch, color in zip(ax.artists, pal):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
ax = dist.boxplot("g", "y", "h", data=self.df, saturation=1)
pal = palettes.color_palette("deep", 2)
for patch, color in zip(ax.artists, pal * 2):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
def test_axes_annotation(self):
ax = dist.boxplot("g", "y", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
nt.assert_equal(ax.get_xlim(), (-.5, 2.5))
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
plt.close("all")
ax = dist.boxplot("g", "y", "h", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
npt.assert_array_equal([l.get_text() for l in ax.legend_.get_texts()],
["m", "n"])
plt.close("all")
ax = dist.boxplot("y", "g", data=self.df, orient="h")
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
nt.assert_equal(ax.get_ylim(), (2.5, -.5))
npt.assert_array_equal(ax.get_yticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_yticklabels()],
["a", "b", "c"])
plt.close("all")
class TestViolinPlotter(object):
"""Test violinplots."""
rs = np.random.RandomState(30)
n_total = 60
x = rs.randn(n_total / 3, 3)
x_df = pd.DataFrame(x, columns=pd.Series(list("XYZ"), name="big"))
y = pd.Series(rs.randn(n_total), name="y_data")
g = pd.Series(np.repeat(list("abc"), n_total / 3), name="small")
h = pd.Series(np.tile(list("mn"), n_total / 2), name="medium")
df = pd.DataFrame(dict(y=y, g=g, h=h))
x_df["W"] = g
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True,
gridsize=100, width=.8, inner="box", split=False,
orient=None, linewidth=None,
color=None, palette=None, saturation=.75)
def test_split_error(self):
kws = self.default_kws.copy()
kws.update(dict(x="h", y="y", hue="g", data=self.df, split=True))
with nt.assert_raises(ValueError):
dist._ViolinPlotter(**kws)
def test_no_observations(self):
p = dist._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[0]), 20)
nt.assert_equal(len(p.support[1]), 0)
nt.assert_equal(len(p.density[0]), 20)
nt.assert_equal(len(p.density[1]), 1)
nt.assert_equal(p.density[1].item(), 1)
p.estimate_densities("scott", 2, "count", True, 20)
nt.assert_equal(p.density[1].item(), 0)
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
p.establish_variables(x, y, h)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[1][0]), 20)
nt.assert_equal(len(p.support[1][1]), 0)
nt.assert_equal(len(p.density[1][0]), 20)
nt.assert_equal(len(p.density[1][1]), 1)
nt.assert_equal(p.density[1][1].item(), 1)
p.estimate_densities("scott", 2, "count", False, 20)
nt.assert_equal(p.density[1][1].item(), 0)
def test_single_observation(self):
p = dist._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[0]), 20)
nt.assert_equal(len(p.support[1]), 1)
nt.assert_equal(len(p.density[0]), 20)
nt.assert_equal(len(p.density[1]), 1)
nt.assert_equal(p.density[1].item(), 1)
p.estimate_densities("scott", 2, "count", True, 20)
nt.assert_equal(p.density[1].item(), .5)
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
p.establish_variables(x, y, h)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[1][0]), 20)
nt.assert_equal(len(p.support[1][1]), 1)
nt.assert_equal(len(p.density[1][0]), 20)
nt.assert_equal(len(p.density[1][1]), 1)
nt.assert_equal(p.density[1][1].item(), 1)
p.estimate_densities("scott", 2, "count", False, 20)
nt.assert_equal(p.density[1][1].item(), .5)
def test_dwidth(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = dist._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .4)
kws.update(dict(width=.4))
p = dist._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .2)
kws.update(dict(hue="h", width=.8))
p = dist._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .2)
kws.update(dict(split=True))
p = dist._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .4)
def test_scale_area(self):
kws = self.default_kws.copy()
kws["scale"] = "area"
p = dist._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
max_before = np.array([d.max() for d in density])
p.scale_area(density, max_before, False)
max_after = np.array([d.max() for d in density])
nt.assert_equal(max_after[0], 1)
before_ratio = max_before[1] / max_before[0]
after_ratio = max_after[1] / max_after[0]
nt.assert_equal(before_ratio, after_ratio)
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, False)
max_after = np.array([[r.max() for r in row] for row in density])
nt.assert_equal(max_after[0, 0], 1)
before_ratio = max_before[1, 1] / max_before[0, 0]
after_ratio = max_after[1, 1] / max_after[0, 0]
nt.assert_equal(before_ratio, after_ratio)
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, True)
max_after = np.array([[r.max() for r in row] for row in density])
nt.assert_equal(max_after[0, 0], 1)
nt.assert_equal(max_after[1, 0], 1)
before_ratio = max_before[1, 1] / max_before[1, 0]
after_ratio = max_after[1, 1] / max_after[1, 0]
nt.assert_equal(before_ratio, after_ratio)
def test_scale_width(self):
kws = self.default_kws.copy()
kws["scale"] = "width"
p = dist._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
p.scale_width(density)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [1, 1])
# Test nested grouping
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
p.scale_width(density)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[1, 1], [1, 1]])
def test_scale_count(self):
kws = self.default_kws.copy()
kws["scale"] = "count"
p = dist._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 20), self.rs.uniform(0, .2, 40)]
counts = np.array([20, 40])
p.scale_count(density, counts, False)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [.5, 1])
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, False)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.05, .4], [1, .5]])
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, True)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.125, 1], [1, .5]])
def test_bad_scale(self):
kws = self.default_kws.copy()
kws["scale"] = "not_a_scale_type"
with nt.assert_raises(ValueError):
dist._ViolinPlotter(**kws)
def test_kde_fit(self):
p = dist._ViolinPlotter(**self.default_kws)
data = self.y
data_std = data.std(ddof=1)
# Bandwidth behavior depends on scipy version
if LooseVersion(scipy.__version__) < "0.11":
# Test ignoring custom bandwidth on old scipy
kde, bw = p.fit_kde(self.y, .2)
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, kde.scotts_factor())
else:
# Test reference rule bandwidth
kde, bw = p.fit_kde(data, "scott")
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, kde.scotts_factor())
nt.assert_equal(bw, kde.scotts_factor() * data_std)
# Test numeric scale factor
kde, bw = p.fit_kde(self.y, .2)
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, .2)
nt.assert_equal(bw, .2 * data_std)
def test_draw_to_density(self):
p = dist._ViolinPlotter(**self.default_kws)
# p.dwidth will be 1 for easier testing
p.width = 2
# Test verical plots
support = np.array([.2, .6])
density = np.array([.1, .4])
# Test full vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test left vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, 0])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test right vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Switch orientation to test horizontal plots
p.orient = "h"
support = np.array([.2, .5])
density = np.array([.3, .7])
# Test full horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, .99 * .7])
plt.close("all")
# Test left horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, 0])
plt.close("all")
# Test right horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [0, .99 * .7])
plt.close("all")
def test_draw_single_observations(self):
p = dist._ViolinPlotter(**self.default_kws)
p.width = 2
# Test vertical plot
_, ax = plt.subplots()
p.draw_single_observation(ax, 1, 1.5, 1)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, 2])
npt.assert_array_equal(y, [1.5, 1.5])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_single_observation(ax, 2, 2.2, .5)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [2.2, 2.2])
npt.assert_array_equal(y, [1.5, 2.5])
plt.close("all")
def test_draw_box_lines(self):
# Test vertical plot
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
nt.assert_equal(len(ax.lines), 2)
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
_, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(y, [q25, q75])
_, y = ax.collections[0].get_offsets().T
nt.assert_equal(y, q50)
plt.close("all")
# Test horizontal plot
kws = self.default_kws.copy()
kws.update(dict(x="y", data=self.df, inner=None))
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
nt.assert_equal(len(ax.lines), 2)
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
x, _ = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, [q25, q75])
x, _ = ax.collections[0].get_offsets().T
nt.assert_equal(x, q50)
plt.close("all")
def test_draw_quartiles(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_quartiles(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(np.percentile(self.y, [25, 50, 75]), ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
plt.close("all")
def test_draw_points(self):
p = dist._ViolinPlotter(**self.default_kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, np.zeros_like(self.y))
npt.assert_array_equal(y, self.y)
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.y)
npt.assert_array_equal(y, np.zeros_like(self.y))
plt.close("all")
def test_draw_sticks(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = dist._ViolinPlotter(**kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
x, _ = line.get_xydata().T
npt.assert_array_equal(x, [val, val])
plt.close("all")
def test_draw_violinplots(self):
kws = self.default_kws.copy()
# Test single vertical violin
kws.update(dict(y="y", data=self.df, inner=None,
saturation=1, color=(1, 0, 0, 1)))
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(1, 0, 0, 1)])
plt.close("all")
# Test single horizontal violin
kws.update(dict(x="y", y=None, color=(0, 1, 0, 1)))
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(0, 1, 0, 1)])
plt.close("all")
# Test multiple vertical violins
kws.update(dict(x="g", y="y", color=None,))
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
for violin, color in zip(ax.collections, palettes.color_palette()):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple violins with hue nesting
kws.update(dict(hue="h"))
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 6)
for violin, color in zip(ax.collections,
palettes.color_palette(n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple split violins
kws.update(dict(split=True, palette="muted"))
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 6)
for violin, color in zip(ax.collections,
palettes.color_palette("muted",
n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
def test_draw_violinplots_no_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
kws.update(x=x, y=y)
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 0)
plt.close("all")
# Test nested hue grouping
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
kws.update(x=x, y=y, hue=h)
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 0)
plt.close("all")
def test_draw_violinplots_single_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
kws.update(x=x, y=y)
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
# Test nested hue grouping
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
kws.update(x=x, y=y, hue=h)
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
# Test nested hue grouping with split
kws["split"] = True
p = dist._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
def test_violinplots(self):
# Smoke test the high level violinplot options
dist.violinplot("y", data=self.df)
plt.close("all")
dist.violinplot(y="y", data=self.df)
plt.close("all")
dist.violinplot("g", "y", data=self.df)
plt.close("all")
dist.violinplot("y", "g", data=self.df, orient="h")
plt.close("all")
dist.violinplot("g", "y", "h", data=self.df)
plt.close("all")
dist.violinplot("y", "g", "h", data=self.df, orient="h")
plt.close("all")
for inner in ["box", "quart", "point", "stick", None]:
dist.violinplot("g", "y", data=self.df, inner=inner)
plt.close("all")
dist.violinplot("g", "y", "h", data=self.df, inner=inner)
plt.close("all")
dist.violinplot("g", "y", "h", data=self.df,
inner=inner, split=True)
plt.close("all")
class TestStripPlotter(object):
"""Test boxplot (also base class for things like violinplots)."""
rs = np.random.RandomState(30)
n_total = 60
y = pd.Series(rs.randn(n_total), name="y_data")
g = pd.Series(np.repeat(list("abc"), n_total / 3), name="small")
h = pd.Series(np.tile(list("mn"), n_total / 2), name="medium")
df = pd.DataFrame(dict(y=y, g=g, h=h))
def test_stripplot_vertical(self):
pal = palettes.color_palette()
ax = dist.stripplot("g", "y", data=self.df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i)
npt.assert_array_equal(y, vals)
npt.assert_equal(ax.collections[i].get_facecolors()[0, :3], pal[i])
plt.close("all")
@skipif(not pandas_has_categoricals)
def test_stripplot_horiztonal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = dist.stripplot("y", "g", data=df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i)
plt.close("all")
def test_stripplot_jitter(self):
pal = palettes.color_palette()
ax = dist.stripplot("g", "y", data=self.df, jitter=True)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_less(np.ones(len(x)) * i - .1, x)
npt.assert_array_less(x, np.ones(len(x)) * i + .1)
npt.assert_array_equal(y, vals)
npt.assert_equal(ax.collections[i].get_facecolors()[0, :3], pal[i])
plt.close("all")
def test_split_nested_stripplot_vertical(self):
pal = palettes.color_palette()
ax = dist.stripplot("g", "y", "h", data=self.df)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i + [-.2, .2][j])
npt.assert_array_equal(y, vals)
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
plt.close("all")
@skipif(not pandas_has_categoricals)
def test_split_nested_stripplot_horizontal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = dist.stripplot("y", "g", "h", data=df)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i + [-.2, .2][j])
plt.close("all")
def test_unsplit_nested_stripplot_vertical(self):
pal = palettes.color_palette()
# Test a simple vertical strip plot
ax = dist.stripplot("g", "y", "h", data=self.df, split=False)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i)
npt.assert_array_equal(y, vals)
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
plt.close("all")
@skipif(not pandas_has_categoricals)
def test_unsplit_nested_stripplot_horizontal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = dist.stripplot("y", "g", "h", data=df, split=False)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i)
plt.close("all")
class TestKDE(object):
rs = np.random.RandomState(0)
x = rs.randn(50)
y = rs.randn(50)
kernel = "gau"
bw = "scott"
gridsize = 128
clip = (-np.inf, np.inf)
cut = 3
def test_scipy_univariate_kde(self):
"""Test the univariate KDE estimation with scipy."""
grid, y = dist._scipy_univariate_kde(self.x, self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._scipy_univariate_kde(self.x, bw, self.gridsize,
self.cut, self.clip)
@skipif(_no_statsmodels)
def test_statsmodels_univariate_kde(self):
"""Test the univariate KDE estimation with statsmodels."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._statsmodels_univariate_kde(self.x, self.kernel, bw,
self.gridsize, self.cut,
self.clip)
def test_scipy_bivariate_kde(self):
"""Test the bivariate KDE estimation with scipy."""
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, self.bw,
self.gridsize, self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
# Test a specific bandwidth
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, 1,
self.gridsize, self.cut, clip)
# Test that we get an error with an invalid bandwidth
with nt.assert_raises(ValueError):
dist._scipy_bivariate_kde(self.x, self.y, (1, 2),
self.gridsize, self.cut, clip)
@skipif(_no_statsmodels)
def test_statsmodels_bivariate_kde(self):
"""Test the bivariate KDE estimation with statsmodels."""
clip = [self.clip, self.clip]
x, y, z = dist._statsmodels_bivariate_kde(self.x, self.y, self.bw,
self.gridsize,
self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
@skipif(_no_statsmodels)
def test_statsmodels_kde_cumulative(self):
"""Test computation of cumulative KDE."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip,
cumulative=True)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
# make sure y is monotonically increasing
npt.assert_((np.diff(y) > 0).all())
def test_kde_cummulative_2d(self):
"""Check error if args indicate bivariate KDE and cumulative."""
with npt.assert_raises(TypeError):
dist.kdeplot(self.x, data2=self.y, cumulative=True)
def test_bivariate_kde_series(self):
df = | pd.DataFrame({'x': self.x, 'y': self.y}) | pandas.DataFrame |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
| tm.assert_frame_equal(expected, result) | pandas._testing.assert_frame_equal |
import pandas as pd
import sys
import utils
import config
nrows = None
tr = utils.load_df(config.data+'train.csv',nrows=nrows)
te = utils.load_df(config.data+'test.csv',nrows=nrows)
actions = ['interaction item image','interaction item info','interaction item deals','interaction item rating','search for item']
df = | pd.concat([tr,te]) | pandas.concat |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
# deprecated properties
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with tm.assertRaisesRegexp(ValueError, length_error):
self.index.copy().levels = [['a'], ['b']]
with tm.assertRaisesRegexp(ValueError, label_error):
self.index.copy().labels = [[0, 0, 0, 0], [0, 0]]
def assert_multiindex_copied(self, copy, original):
# levels shoudl be (at least, shallow copied)
assert_copy(copy.levels, original.levels)
assert_almost_equal(copy.labels, original.labels)
# labels doesn't matter which way copied
assert_almost_equal(copy.labels, original.labels)
self.assertIsNot(copy.labels, original.labels)
# names doesn't matter which way copied
self.assertEqual(copy.names, original.names)
self.assertIsNot(copy.names, original.names)
# sort order should be copied
self.assertEqual(copy.sortorder, original.sortorder)
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
self.assertEqual([level.name for level in index.levels], list(names))
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_duplicate_names(self):
self.index.names = ['foo', 'foo']
assertRaisesRegexp(KeyError, 'Level foo not found',
self.index._get_level_number, 'foo')
def test_get_level_number_integer(self):
self.index.names = [1, 0]
self.assertEqual(self.index._get_level_number(1), 0)
self.assertEqual(self.index._get_level_number(0), 1)
self.assertRaises(IndexError, self.index._get_level_number, 2)
assertRaisesRegexp(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
result = MultiIndex.from_arrays(arrays)
self.assertEqual(list(result), list(self.index))
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')], ['a', 'b']])
self.assertTrue(result.levels[0].equals(Index([Timestamp('20130101')])))
self.assertTrue(result.levels[1].equals(Index(['a','b'])))
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'),
('bar', 'a'), ('bar', 'b'), ('bar', 'c'),
('buz', 'a'), ('buz', 'b'), ('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
assert_array_equal(result, expected)
self.assertEqual(result.names, names)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = pd.lib.list_to_object_array([(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02'))])
assert_array_equal(mi.values, etalon)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')),
(2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
mi = pd.MultiIndex.from_tuples(tuples)
assert_array_equal(mi.values, pd.lib.list_to_object_array(tuples))
# Check that code branches for boxed values produce identical results
assert_array_equal(mi.values[:4], mi[:4].values)
def test_append(self):
result = self.index[:3].append(self.index[3:])
self.assertTrue(result.equals(self.index))
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(self.index))
# empty
result = self.index.append([])
self.assertTrue(result.equals(self.index))
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux']
self.assert_numpy_array_equal(result, expected)
self.assertEqual(result.name, 'first')
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
self.assert_numpy_array_equal(result, expected)
def test_get_level_values_na(self):
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [1, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = [np.nan, np.nan, 2]
assert_array_equal(values.values.astype(float), expected)
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
expected = [np.nan, np.nan, np.nan]
assert_array_equal(values.values.astype(float), expected)
values = index.get_level_values(1)
expected = np.array(['a', np.nan, 1],dtype=object)
assert_array_equal(values.values, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
assert_array_equal(values.values, expected.values)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
values = index.get_level_values(0)
self.assertEqual(values.shape, (0,))
def test_reorder_levels(self):
# this blows up
assertRaisesRegexp(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
self.assertEqual(self.index.nlevels, 2)
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
self.assertEqual(result, expected)
def test_legacy_pickle(self):
if compat.PY3:
raise nose.SkipTest("testing for legacy pickles not support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
self.assertTrue(obj.equals(obj2))
res = obj.get_indexer(obj)
exp = np.arange(len(obj))
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=MultiIndex.from_product([[1,2],['a','b'],date_range('20130101',periods=3,tz='US/Eastern')],names=['one','two','three'])
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equal_levels(unpickled))
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
self.assertTrue((result.values == self.index.values).all())
def test_contains(self):
self.assertIn(('foo', 'two'), self.index)
self.assertNotIn(('bar', 'two'), self.index)
self.assertNotIn(None, self.index)
def test_is_all_dates(self):
self.assertFalse(self.index.is_all_dates)
def test_is_numeric(self):
# MultiIndex is never numeric
self.assertFalse(self.index.is_numeric())
def test_getitem(self):
# scalar
self.assertEqual(self.index[2], ('bar', 'one'))
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
self.assertTrue(result.equals(expected))
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
self.assertTrue(result.equals(expected))
self.assertTrue(result2.equals(expected))
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
self.assertEqual(sorted_idx.get_loc('baz'), slice(3, 4))
self.assertEqual(sorted_idx.get_loc('foo'), slice(0, 2))
def test_get_loc(self):
self.assertEqual(self.index.get_loc(('foo', 'two')), 1)
self.assertEqual(self.index.get_loc(('baz', 'two')), 3)
self.assertRaises(KeyError, self.index.get_loc, ('bar', 'two'))
self.assertRaises(KeyError, self.index.get_loc, 'quux')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)),
Index( | lrange(4) | pandas.compat.lrange |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.