body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"four tests for granger non causality of 2 timeseries\n\n all four tests give similar results\n `params_ftest` and `ssr_ftest` are equivalent based on F test which is\n identical to lmtest:grangertest in R\n\n Parameters\n ----------\n x : array, 2d\n data for test whether the time series in the second column Granger\n causes the time series in the first column\n maxlag : integer\n the Granger causality test results are calculated for all lags up to\n maxlag\n verbose : bool\n print results if true\n\n Returns\n -------\n results : dictionary\n all test results, dictionary keys are the number of lags. For each\n lag the values are a tuple, with the first element a dictionary with\n teststatistic, pvalues, degrees of freedom, the second element are\n the OLS estimation results for the restricted model, the unrestricted\n model and the restriction (contrast) matrix for the parameter f_test.\n\n Notes\n -----\n TODO: convert to class and attach results properly\n\n The Null hypothesis for grangercausalitytests is that the time series in\n the second column, x2, does NOT Granger cause the time series in the first\n column, x1. Grange causality means that past values of x2 have a\n statistically significant effect on the current value of x1, taking past\n values of x1 into account as regressors. We reject the null hypothesis\n that x2 does not Granger cause x1 if the pvalues are below a desired size\n of the test.\n\n The null hypothesis for all four test is that the coefficients\n corresponding to past values of the second time series are zero.\n\n 'params_ftest', 'ssr_ftest' are based on F distribution\n\n 'ssr_chi2test', 'lrtest' are based on chi-square distribution\n\n References\n ----------\n http://en.wikipedia.org/wiki/Granger_causality\n Greene: Econometric Analysis\n\n "
from scipy import stats
x = np.asarray(x)
if (x.shape[0] <= ((3 * maxlag) + int(addconst))):
raise ValueError('Insufficient observations. Maximum allowable lag is {0}'.format((int(((x.shape[0] - int(addconst)) / 3)) - 1)))
resli = {}
for mlg in range(1, (maxlag + 1)):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
fgc1 = ((((res2down.ssr - res2djoint.ssr) / res2djoint.ssr) / mxlg) * res2djoint.df_resid)
if verbose:
print(('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d, df_num=%d' % (fgc1, stats.f.sf(fgc1, mxlg, res2djoint.df_resid), res2djoint.df_resid, mxlg)))
result['ssr_ftest'] = (fgc1, stats.f.sf(fgc1, mxlg, res2djoint.df_resid), res2djoint.df_resid, mxlg)
fgc2 = ((res2down.nobs * (res2down.ssr - res2djoint.ssr)) / res2djoint.ssr)
if verbose:
print(('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
lr = ((- 2) * (res2down.llf - res2djoint.llf))
if verbose:
print(('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' % (lr, stats.chi2.sf(lr, mxlg), mxlg)))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
rconstr = np.column_stack((np.zeros((mxlg, mxlg)), np.eye(mxlg, mxlg), np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print(('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d, df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom, ftres.df_num)))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()], np.squeeze(ftres.pvalue)[()], ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli | 2,497,703,576,321,163,000 | four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis | statsmodels/tsa/stattools.py | grangercausalitytests | josef-pkt/statsmodels | python | def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"four tests for granger non causality of 2 timeseries\n\n all four tests give similar results\n `params_ftest` and `ssr_ftest` are equivalent based on F test which is\n identical to lmtest:grangertest in R\n\n Parameters\n ----------\n x : array, 2d\n data for test whether the time series in the second column Granger\n causes the time series in the first column\n maxlag : integer\n the Granger causality test results are calculated for all lags up to\n maxlag\n verbose : bool\n print results if true\n\n Returns\n -------\n results : dictionary\n all test results, dictionary keys are the number of lags. For each\n lag the values are a tuple, with the first element a dictionary with\n teststatistic, pvalues, degrees of freedom, the second element are\n the OLS estimation results for the restricted model, the unrestricted\n model and the restriction (contrast) matrix for the parameter f_test.\n\n Notes\n -----\n TODO: convert to class and attach results properly\n\n The Null hypothesis for grangercausalitytests is that the time series in\n the second column, x2, does NOT Granger cause the time series in the first\n column, x1. Grange causality means that past values of x2 have a\n statistically significant effect on the current value of x1, taking past\n values of x1 into account as regressors. We reject the null hypothesis\n that x2 does not Granger cause x1 if the pvalues are below a desired size\n of the test.\n\n The null hypothesis for all four test is that the coefficients\n corresponding to past values of the second time series are zero.\n\n 'params_ftest', 'ssr_ftest' are based on F distribution\n\n 'ssr_chi2test', 'lrtest' are based on chi-square distribution\n\n References\n ----------\n http://en.wikipedia.org/wiki/Granger_causality\n Greene: Econometric Analysis\n\n "
from scipy import stats
x = np.asarray(x)
if (x.shape[0] <= ((3 * maxlag) + int(addconst))):
raise ValueError('Insufficient observations. Maximum allowable lag is {0}'.format((int(((x.shape[0] - int(addconst)) / 3)) - 1)))
resli = {}
for mlg in range(1, (maxlag + 1)):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
fgc1 = ((((res2down.ssr - res2djoint.ssr) / res2djoint.ssr) / mxlg) * res2djoint.df_resid)
if verbose:
print(('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d, df_num=%d' % (fgc1, stats.f.sf(fgc1, mxlg, res2djoint.df_resid), res2djoint.df_resid, mxlg)))
result['ssr_ftest'] = (fgc1, stats.f.sf(fgc1, mxlg, res2djoint.df_resid), res2djoint.df_resid, mxlg)
fgc2 = ((res2down.nobs * (res2down.ssr - res2djoint.ssr)) / res2djoint.ssr)
if verbose:
print(('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
lr = ((- 2) * (res2down.llf - res2djoint.llf))
if verbose:
print(('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' % (lr, stats.chi2.sf(lr, mxlg), mxlg)))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
rconstr = np.column_stack((np.zeros((mxlg, mxlg)), np.eye(mxlg, mxlg), np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print(('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d, df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom, ftres.df_num)))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()], np.squeeze(ftres.pvalue)[()], ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli |
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic', return_results=None):
'Test for no-cointegration of a univariate equation\n\n The null hypothesis is no cointegration. Variables in y0 and y1 are\n assumed to be integrated of order 1, I(1).\n\n This uses the augmented Engle-Granger two-step cointegration test.\n Constant or trend is included in 1st stage regression, i.e. in\n cointegrating equation.\n\n **Warning:** The autolag default has changed compared to statsmodels 0.8.\n In 0.8 autolag was always None, no the keyword is used and defaults to\n \'aic\'. Use `autolag=None` to avoid the lag search.\n\n Parameters\n ----------\n y1 : array_like, 1d\n first element in cointegrating vector\n y2 : array_like\n remaining elements in cointegrating vector\n trend : str {\'c\', \'ct\'}\n trend term included in regression for cointegrating equation\n * \'c\' : constant\n * \'ct\' : constant and linear trend\n * also available quadratic trend \'ctt\', and no constant \'nc\'\n\n method : string\n currently only \'aeg\' for augmented Engle-Granger test is available.\n default might change.\n maxlag : None or int\n keyword for `adfuller`, largest or given number of lags\n autolag : string\n keyword for `adfuller`, lag selection criterion.\n * if None, then maxlag lags are used without lag search\n * if \'AIC\' (default) or \'BIC\', then the number of lags is chosen\n to minimize the corresponding information criterion\n * \'t-stat\' based choice of maxlag. Starts with maxlag and drops a\n lag until the t-statistic on the last lag length is significant\n using a 5%-sized test\n\n return_results : bool\n for future compatibility, currently only tuple available.\n If True, then a results instance is returned. Otherwise, a tuple\n with the test outcome is returned.\n Set `return_results=False` to avoid future changes in return.\n\n\n Returns\n -------\n coint_t : float\n t-statistic of unit-root test on residuals\n pvalue : float\n MacKinnon\'s approximate, asymptotic p-value based on MacKinnon (1994)\n crit_value : dict\n Critical values for the test statistic at the 1 %, 5 %, and 10 %\n levels based on regression curve. This depends on the number of\n observations.\n\n Notes\n -----\n The Null hypothesis is that there is no cointegration, the alternative\n hypothesis is that there is cointegrating relationship. If the pvalue is\n small, below a critical size, then we can reject the hypothesis that there\n is no cointegrating relationship.\n\n P-values and critical values are obtained through regression surface\n approximation from MacKinnon 1994 and 2010.\n\n If the two series are almost perfectly collinear, then computing the\n test is numerically unstable. However, the two series will be cointegrated\n under the maintained assumption that they are integrated. In this case\n the t-statistic will be set to -inf and the pvalue to zero.\n\n TODO: We could handle gaps in data by dropping rows with nans in the\n auxiliary regressions. Not implemented yet, currently assumes no nans\n and no gaps in time series.\n\n References\n ----------\n MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for\n Unit-Root and Cointegration Tests." Journal of Business & Economics\n Statistics, 12.2, 167-76.\n MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."\n Queen\'s University, Dept of Economics Working Papers 1227.\n http://ideas.repec.org/p/qed/wpaper/1227.html\n '
trend = trend.lower()
if (trend not in ['c', 'nc', 'ct', 'ctt']):
raise ValueError(('trend option %s not understood' % trend))
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if (y1.ndim < 2):
y1 = y1[:, None]
(nobs, k_vars) = y1.shape
k_vars += 1
if (trend == 'nc'):
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if (res_co.rsquared < (1 - (100 * SQRTEPS))):
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag, regression='nc')
else:
import warnings
warnings.warn('y0 and y1 are (almost) perfectly colinear.Cointegration test is not reliable in this case.')
res_adf = ((- np.inf),)
if (trend == 'nc'):
crit = ([np.nan] * 3)
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=(nobs - 1))
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return (res_adf[0], pval_asy, crit) | 3,271,704,490,787,290,600 | Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html | statsmodels/tsa/stattools.py | coint | josef-pkt/statsmodels | python | def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic', return_results=None):
'Test for no-cointegration of a univariate equation\n\n The null hypothesis is no cointegration. Variables in y0 and y1 are\n assumed to be integrated of order 1, I(1).\n\n This uses the augmented Engle-Granger two-step cointegration test.\n Constant or trend is included in 1st stage regression, i.e. in\n cointegrating equation.\n\n **Warning:** The autolag default has changed compared to statsmodels 0.8.\n In 0.8 autolag was always None, no the keyword is used and defaults to\n \'aic\'. Use `autolag=None` to avoid the lag search.\n\n Parameters\n ----------\n y1 : array_like, 1d\n first element in cointegrating vector\n y2 : array_like\n remaining elements in cointegrating vector\n trend : str {\'c\', \'ct\'}\n trend term included in regression for cointegrating equation\n * \'c\' : constant\n * \'ct\' : constant and linear trend\n * also available quadratic trend \'ctt\', and no constant \'nc\'\n\n method : string\n currently only \'aeg\' for augmented Engle-Granger test is available.\n default might change.\n maxlag : None or int\n keyword for `adfuller`, largest or given number of lags\n autolag : string\n keyword for `adfuller`, lag selection criterion.\n * if None, then maxlag lags are used without lag search\n * if \'AIC\' (default) or \'BIC\', then the number of lags is chosen\n to minimize the corresponding information criterion\n * \'t-stat\' based choice of maxlag. Starts with maxlag and drops a\n lag until the t-statistic on the last lag length is significant\n using a 5%-sized test\n\n return_results : bool\n for future compatibility, currently only tuple available.\n If True, then a results instance is returned. Otherwise, a tuple\n with the test outcome is returned.\n Set `return_results=False` to avoid future changes in return.\n\n\n Returns\n -------\n coint_t : float\n t-statistic of unit-root test on residuals\n pvalue : float\n MacKinnon\'s approximate, asymptotic p-value based on MacKinnon (1994)\n crit_value : dict\n Critical values for the test statistic at the 1 %, 5 %, and 10 %\n levels based on regression curve. This depends on the number of\n observations.\n\n Notes\n -----\n The Null hypothesis is that there is no cointegration, the alternative\n hypothesis is that there is cointegrating relationship. If the pvalue is\n small, below a critical size, then we can reject the hypothesis that there\n is no cointegrating relationship.\n\n P-values and critical values are obtained through regression surface\n approximation from MacKinnon 1994 and 2010.\n\n If the two series are almost perfectly collinear, then computing the\n test is numerically unstable. However, the two series will be cointegrated\n under the maintained assumption that they are integrated. In this case\n the t-statistic will be set to -inf and the pvalue to zero.\n\n TODO: We could handle gaps in data by dropping rows with nans in the\n auxiliary regressions. Not implemented yet, currently assumes no nans\n and no gaps in time series.\n\n References\n ----------\n MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for\n Unit-Root and Cointegration Tests." Journal of Business & Economics\n Statistics, 12.2, 167-76.\n MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."\n Queen\'s University, Dept of Economics Working Papers 1227.\n http://ideas.repec.org/p/qed/wpaper/1227.html\n '
trend = trend.lower()
if (trend not in ['c', 'nc', 'ct', 'ctt']):
raise ValueError(('trend option %s not understood' % trend))
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if (y1.ndim < 2):
y1 = y1[:, None]
(nobs, k_vars) = y1.shape
k_vars += 1
if (trend == 'nc'):
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if (res_co.rsquared < (1 - (100 * SQRTEPS))):
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag, regression='nc')
else:
import warnings
warnings.warn('y0 and y1 are (almost) perfectly colinear.Cointegration test is not reliable in this case.')
res_adf = ((- np.inf),)
if (trend == 'nc'):
crit = ([np.nan] * 3)
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=(nobs - 1))
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return (res_adf[0], pval_asy, crit) |
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c', model_kw={}, fit_kw={}):
"\n Returns information criteria for many ARMA models\n\n Parameters\n ----------\n y : array-like\n Time-series data\n max_ar : int\n Maximum number of AR lags to use. Default 4.\n max_ma : int\n Maximum number of MA lags to use. Default 2.\n ic : str, list\n Information criteria to report. Either a single string or a list\n of different criteria is possible.\n trend : str\n The trend to use when fitting the ARMA models.\n model_kw : dict\n Keyword arguments to be passed to the ``ARMA`` model\n fit_kw : dict\n Keyword arguments to be passed to ``ARMA.fit``.\n\n Returns\n -------\n obj : Results object\n Each ic is an attribute with a DataFrame for the results. The AR order\n used is the row index. The ma order used is the column index. The\n minimum orders are available as ``ic_min_order``.\n\n Examples\n --------\n\n >>> from statsmodels.tsa.arima_process import arma_generate_sample\n >>> import statsmodels.api as sm\n >>> import numpy as np\n\n >>> arparams = np.array([.75, -.25])\n >>> maparams = np.array([.65, .35])\n >>> arparams = np.r_[1, -arparams]\n >>> maparam = np.r_[1, maparams]\n >>> nobs = 250\n >>> np.random.seed(2014)\n >>> y = arma_generate_sample(arparams, maparams, nobs)\n >>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')\n >>> res.aic_min_order\n >>> res.bic_min_order\n\n Notes\n -----\n This method can be used to tentatively identify the order of an ARMA\n process, provided that the time series is stationary and invertible. This\n function computes the full exact MLE estimate of each model and can be,\n therefore a little slow. An implementation using approximate estimates\n will be provided in the future. In the meantime, consider passing\n {method : 'css'} to fit_kw.\n "
from pandas import DataFrame
ar_range = lrange(0, (max_ar + 1))
ma_range = lrange(0, (max_ma + 1))
if isinstance(ic, string_types):
ic = [ic]
elif (not isinstance(ic, (list, tuple))):
raise ValueError('Need a list or a tuple for ic if not a string.')
results = np.zeros((len(ic), (max_ar + 1), (max_ma + 1)))
for ar in ar_range:
for ma in ma_range:
if ((ar == 0) and (ma == 0) and (trend == 'nc')):
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if (mod is None):
results[:, ar, ma] = np.nan
continue
for (i, criteria) in enumerate(ic):
results[(i, ar, ma)] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
min_res = {}
for (i, result) in iteritems(res):
mins = np.where((result.min().min() == result))
min_res.update({(i + '_min_order'): (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res) | -3,637,179,301,659,311,600 | Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw. | statsmodels/tsa/stattools.py | arma_order_select_ic | josef-pkt/statsmodels | python | def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c', model_kw={}, fit_kw={}):
"\n Returns information criteria for many ARMA models\n\n Parameters\n ----------\n y : array-like\n Time-series data\n max_ar : int\n Maximum number of AR lags to use. Default 4.\n max_ma : int\n Maximum number of MA lags to use. Default 2.\n ic : str, list\n Information criteria to report. Either a single string or a list\n of different criteria is possible.\n trend : str\n The trend to use when fitting the ARMA models.\n model_kw : dict\n Keyword arguments to be passed to the ``ARMA`` model\n fit_kw : dict\n Keyword arguments to be passed to ``ARMA.fit``.\n\n Returns\n -------\n obj : Results object\n Each ic is an attribute with a DataFrame for the results. The AR order\n used is the row index. The ma order used is the column index. The\n minimum orders are available as ``ic_min_order``.\n\n Examples\n --------\n\n >>> from statsmodels.tsa.arima_process import arma_generate_sample\n >>> import statsmodels.api as sm\n >>> import numpy as np\n\n >>> arparams = np.array([.75, -.25])\n >>> maparams = np.array([.65, .35])\n >>> arparams = np.r_[1, -arparams]\n >>> maparam = np.r_[1, maparams]\n >>> nobs = 250\n >>> np.random.seed(2014)\n >>> y = arma_generate_sample(arparams, maparams, nobs)\n >>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')\n >>> res.aic_min_order\n >>> res.bic_min_order\n\n Notes\n -----\n This method can be used to tentatively identify the order of an ARMA\n process, provided that the time series is stationary and invertible. This\n function computes the full exact MLE estimate of each model and can be,\n therefore a little slow. An implementation using approximate estimates\n will be provided in the future. In the meantime, consider passing\n {method : 'css'} to fit_kw.\n "
from pandas import DataFrame
ar_range = lrange(0, (max_ar + 1))
ma_range = lrange(0, (max_ma + 1))
if isinstance(ic, string_types):
ic = [ic]
elif (not isinstance(ic, (list, tuple))):
raise ValueError('Need a list or a tuple for ic if not a string.')
results = np.zeros((len(ic), (max_ar + 1), (max_ma + 1)))
for ar in ar_range:
for ma in ma_range:
if ((ar == 0) and (ma == 0) and (trend == 'nc')):
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if (mod is None):
results[:, ar, ma] = np.nan
continue
for (i, criteria) in enumerate(ic):
results[(i, ar, ma)] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
min_res = {}
for (i, result) in iteritems(res):
mins = np.where((result.min().min() == result))
min_res.update({(i + '_min_order'): (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res) |
def has_missing(data):
"\n Returns True if 'data' contains missing entries, otherwise False\n "
return np.isnan(np.sum(data)) | -7,950,675,208,535,767,000 | Returns True if 'data' contains missing entries, otherwise False | statsmodels/tsa/stattools.py | has_missing | josef-pkt/statsmodels | python | def has_missing(data):
"\n \n "
return np.isnan(np.sum(data)) |
def kpss(x, regression='c', lags=None, store=False):
"\n Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.\n\n Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null\n hypothesis that x is level or trend stationary.\n\n Parameters\n ----------\n x : array_like, 1d\n Data series\n regression : str{'c', 'ct'}\n Indicates the null hypothesis for the KPSS test\n * 'c' : The data is stationary around a constant (default)\n * 'ct' : The data is stationary around a trend\n lags : int\n Indicates the number of lags to be used. If None (default),\n lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in\n Schwert (1989).\n store : bool\n If True, then a result instance is returned additionally to\n the KPSS statistic (default is False).\n\n Returns\n -------\n kpss_stat : float\n The KPSS test statistic\n p_value : float\n The p-value of the test. The p-value is interpolated from\n Table 1 in Kwiatkowski et al. (1992), and a boundary point\n is returned if the test statistic is outside the table of\n critical values, that is, if the p-value is outside the\n interval (0.01, 0.1).\n lags : int\n The truncation lag parameter\n crit : dict\n The critical values at 10%, 5%, 2.5% and 1%. Based on\n Kwiatkowski et al. (1992).\n resstore : (optional) instance of ResultStore\n An instance of a dummy class with results attached as attributes\n\n Notes\n -----\n To estimate sigma^2 the Newey-West estimator is used. If lags is None,\n the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),\n as outlined in Schwert (1989). The p-values are interpolated from\n Table 1 of Kwiatkowski et al. (1992). If the computed statistic is\n outside the table of critical values, then a warning message is\n generated.\n\n Missing values are not handled.\n\n References\n ----------\n D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing\n the Null Hypothesis of Stationarity against the Alternative of a Unit Root.\n `Journal of Econometrics` 54, 159-178.\n "
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
if (nobs != x.size):
raise ValueError('x of shape {0} not understood'.format(x.shape))
if (hypo == 'ct'):
resids = OLS(x, add_constant(np.arange(1, (nobs + 1)))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif (hypo == 'c'):
resids = (x - x.mean())
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if (lags is None):
lags = int(np.ceil((12.0 * np.power((nobs / 100.0), (1 / 4.0)))))
pvals = [0.1, 0.05, 0.025, 0.01]
eta = (sum((resids.cumsum() ** 2)) / (nobs ** 2))
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = (eta / s_hat)
p_value = np.interp(kpss_stat, crit, pvals)
if (p_value == pvals[(- 1)]):
warn('p-value is smaller than the indicated p-value', InterpolationWarning)
elif (p_value == pvals[0]):
warn('p-value is greater than the indicated p-value', InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = ('level' if (hypo == 'c') else 'trend')
rstore.H0 = 'The series is {0} stationary'.format(stationary_type)
rstore.HA = 'The series is not {0} stationary'.format(stationary_type)
return (kpss_stat, p_value, crit_dict, rstore)
else:
return (kpss_stat, p_value, lags, crit_dict) | -7,045,372,392,550,583,000 | Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178. | statsmodels/tsa/stattools.py | kpss | josef-pkt/statsmodels | python | def kpss(x, regression='c', lags=None, store=False):
"\n Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.\n\n Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null\n hypothesis that x is level or trend stationary.\n\n Parameters\n ----------\n x : array_like, 1d\n Data series\n regression : str{'c', 'ct'}\n Indicates the null hypothesis for the KPSS test\n * 'c' : The data is stationary around a constant (default)\n * 'ct' : The data is stationary around a trend\n lags : int\n Indicates the number of lags to be used. If None (default),\n lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in\n Schwert (1989).\n store : bool\n If True, then a result instance is returned additionally to\n the KPSS statistic (default is False).\n\n Returns\n -------\n kpss_stat : float\n The KPSS test statistic\n p_value : float\n The p-value of the test. The p-value is interpolated from\n Table 1 in Kwiatkowski et al. (1992), and a boundary point\n is returned if the test statistic is outside the table of\n critical values, that is, if the p-value is outside the\n interval (0.01, 0.1).\n lags : int\n The truncation lag parameter\n crit : dict\n The critical values at 10%, 5%, 2.5% and 1%. Based on\n Kwiatkowski et al. (1992).\n resstore : (optional) instance of ResultStore\n An instance of a dummy class with results attached as attributes\n\n Notes\n -----\n To estimate sigma^2 the Newey-West estimator is used. If lags is None,\n the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),\n as outlined in Schwert (1989). The p-values are interpolated from\n Table 1 of Kwiatkowski et al. (1992). If the computed statistic is\n outside the table of critical values, then a warning message is\n generated.\n\n Missing values are not handled.\n\n References\n ----------\n D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing\n the Null Hypothesis of Stationarity against the Alternative of a Unit Root.\n `Journal of Econometrics` 54, 159-178.\n "
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
if (nobs != x.size):
raise ValueError('x of shape {0} not understood'.format(x.shape))
if (hypo == 'ct'):
resids = OLS(x, add_constant(np.arange(1, (nobs + 1)))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif (hypo == 'c'):
resids = (x - x.mean())
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if (lags is None):
lags = int(np.ceil((12.0 * np.power((nobs / 100.0), (1 / 4.0)))))
pvals = [0.1, 0.05, 0.025, 0.01]
eta = (sum((resids.cumsum() ** 2)) / (nobs ** 2))
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = (eta / s_hat)
p_value = np.interp(kpss_stat, crit, pvals)
if (p_value == pvals[(- 1)]):
warn('p-value is smaller than the indicated p-value', InterpolationWarning)
elif (p_value == pvals[0]):
warn('p-value is greater than the indicated p-value', InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = ('level' if (hypo == 'c') else 'trend')
rstore.H0 = 'The series is {0} stationary'.format(stationary_type)
rstore.HA = 'The series is not {0} stationary'.format(stationary_type)
return (kpss_stat, p_value, crit_dict, rstore)
else:
return (kpss_stat, p_value, lags, crit_dict) |
def _sigma_est_kpss(resids, nobs, lags):
'\n Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the\n consistent estimator for the variance.\n '
s_hat = sum((resids ** 2))
for i in range(1, (lags + 1)):
resids_prod = np.dot(resids[i:], resids[:(nobs - i)])
s_hat += ((2 * resids_prod) * (1.0 - (i / (lags + 1.0))))
return (s_hat / nobs) | -4,347,780,852,716,475,400 | Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance. | statsmodels/tsa/stattools.py | _sigma_est_kpss | josef-pkt/statsmodels | python | def _sigma_est_kpss(resids, nobs, lags):
'\n Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the\n consistent estimator for the variance.\n '
s_hat = sum((resids ** 2))
for i in range(1, (lags + 1)):
resids_prod = np.dot(resids[i:], resids[:(nobs - i)])
s_hat += ((2 * resids_prod) * (1.0 - (i / (lags + 1.0))))
return (s_hat / nobs) |
async def trigger_update(opp):
'Trigger a polling update by moving time forward.'
new_time = (dt.utcnow() + timedelta(seconds=(SCAN_INTERVAL + 1)))
async_fire_time_changed(opp, new_time)
(await opp.async_block_till_done()) | -1,536,932,550,561,218,800 | Trigger a polling update by moving time forward. | tests/components/smarttub/__init__.py | trigger_update | OpenPeerPower/core | python | async def trigger_update(opp):
new_time = (dt.utcnow() + timedelta(seconds=(SCAN_INTERVAL + 1)))
async_fire_time_changed(opp, new_time)
(await opp.async_block_till_done()) |
def __init__(self, dataclass_types: Union[(DataClassType, Iterable[DataClassType])], **kwargs):
'\n Args:\n dataclass_types:\n Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.\n kwargs:\n (Optional) Passed to `argparse.ArgumentParser()` in the regular way.\n '
super().__init__(**kwargs)
if dataclasses.is_dataclass(dataclass_types):
dataclass_types = [dataclass_types]
self.dataclass_types = dataclass_types
for dtype in self.dataclass_types:
self._add_dataclass_arguments(dtype) | 5,540,399,526,315,942,000 | Args:
dataclass_types:
Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
kwargs:
(Optional) Passed to `argparse.ArgumentParser()` in the regular way. | toolbox/KGArgsParser.py | __init__ | LinXueyuanStdio/KGE-toolbox | python | def __init__(self, dataclass_types: Union[(DataClassType, Iterable[DataClassType])], **kwargs):
'\n Args:\n dataclass_types:\n Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.\n kwargs:\n (Optional) Passed to `argparse.ArgumentParser()` in the regular way.\n '
super().__init__(**kwargs)
if dataclasses.is_dataclass(dataclass_types):
dataclass_types = [dataclass_types]
self.dataclass_types = dataclass_types
for dtype in self.dataclass_types:
self._add_dataclass_arguments(dtype) |
def parse_args_into_dataclasses(self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None) -> Tuple[(DataClass, ...)]:
'\n Parse command-line args into instances of the specified dataclass types.\n\n This relies on argparse\'s `ArgumentParser.parse_known_args`. See the doc at:\n docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args\n\n Args:\n args:\n List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)\n return_remaining_strings:\n If true, also return a list of remaining argument strings.\n look_for_args_file:\n If true, will look for a ".args" file with the same base name as the entry point script for this\n process, and will append its potential content to the command line args.\n args_filename:\n If not None, will uses this file instead of the ".args" file specified in the previous argument.\n\n Returns:\n Tuple consisting of:\n\n - the dataclass instances in the same order as they were passed to the initializer.abspath\n - if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser\n after initialization.\n - The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)\n '
if (args_filename or (look_for_args_file and len(sys.argv))):
if args_filename:
args_file = Path(args_filename)
else:
args_file = Path(sys.argv[0]).with_suffix('.args')
if args_file.exists():
fargs = args_file.read_text().split()
args = ((fargs + args) if (args is not None) else (fargs + sys.argv[1:]))
(namespace, remaining_args) = self.parse_known_args(args=args)
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in vars(namespace).items() if (k in keys)}
for k in keys:
delattr(namespace, k)
obj = dtype(**inputs)
outputs.append(obj)
if (len(namespace.__dict__) > 0):
outputs.append(namespace)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'Some specified arguments are not used by the KGEArgParser: {remaining_args}')
return (*outputs,) | 7,657,435,898,646,354,000 | Parse command-line args into instances of the specified dataclass types.
This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:
docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
Args:
args:
List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)
return_remaining_strings:
If true, also return a list of remaining argument strings.
look_for_args_file:
If true, will look for a ".args" file with the same base name as the entry point script for this
process, and will append its potential content to the command line args.
args_filename:
If not None, will uses this file instead of the ".args" file specified in the previous argument.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.abspath
- if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser
after initialization.
- The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args) | toolbox/KGArgsParser.py | parse_args_into_dataclasses | LinXueyuanStdio/KGE-toolbox | python | def parse_args_into_dataclasses(self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None) -> Tuple[(DataClass, ...)]:
'\n Parse command-line args into instances of the specified dataclass types.\n\n This relies on argparse\'s `ArgumentParser.parse_known_args`. See the doc at:\n docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args\n\n Args:\n args:\n List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)\n return_remaining_strings:\n If true, also return a list of remaining argument strings.\n look_for_args_file:\n If true, will look for a ".args" file with the same base name as the entry point script for this\n process, and will append its potential content to the command line args.\n args_filename:\n If not None, will uses this file instead of the ".args" file specified in the previous argument.\n\n Returns:\n Tuple consisting of:\n\n - the dataclass instances in the same order as they were passed to the initializer.abspath\n - if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser\n after initialization.\n - The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)\n '
if (args_filename or (look_for_args_file and len(sys.argv))):
if args_filename:
args_file = Path(args_filename)
else:
args_file = Path(sys.argv[0]).with_suffix('.args')
if args_file.exists():
fargs = args_file.read_text().split()
args = ((fargs + args) if (args is not None) else (fargs + sys.argv[1:]))
(namespace, remaining_args) = self.parse_known_args(args=args)
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in vars(namespace).items() if (k in keys)}
for k in keys:
delattr(namespace, k)
obj = dtype(**inputs)
outputs.append(obj)
if (len(namespace.__dict__) > 0):
outputs.append(namespace)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'Some specified arguments are not used by the KGEArgParser: {remaining_args}')
return (*outputs,) |
def parse_json_file(self, json_file: str) -> Tuple[(DataClass, ...)]:
'\n Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the\n dataclass types.\n '
data = json.loads(Path(json_file).read_text())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in data.items() if (k in keys)}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,) | -4,033,736,629,704,605,700 | Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
dataclass types. | toolbox/KGArgsParser.py | parse_json_file | LinXueyuanStdio/KGE-toolbox | python | def parse_json_file(self, json_file: str) -> Tuple[(DataClass, ...)]:
'\n Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the\n dataclass types.\n '
data = json.loads(Path(json_file).read_text())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in data.items() if (k in keys)}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,) |
def parse_dict(self, args: dict) -> Tuple[(DataClass, ...)]:
'\n Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass\n types.\n '
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in args.items() if (k in keys)}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,) | 3,798,765,331,785,445,000 | Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
types. | toolbox/KGArgsParser.py | parse_dict | LinXueyuanStdio/KGE-toolbox | python | def parse_dict(self, args: dict) -> Tuple[(DataClass, ...)]:
'\n Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass\n types.\n '
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for (k, v) in args.items() if (k in keys)}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,) |
def __init__(self, pkg_dict: Dict[(str, Any)]):
'\n Class containing data that describes a package API\n\n :param pkg_dict: A dictionary representation of a\n software package, complying with the output format of\n doppel-describe.\n\n '
self._validate_pkg(pkg_dict)
self.pkg_dict = pkg_dict | -6,044,739,435,903,076,000 | Class containing data that describes a package API
:param pkg_dict: A dictionary representation of a
software package, complying with the output format of
doppel-describe. | doppel/PackageAPI.py | __init__ | franklinen/doppel-cli | python | def __init__(self, pkg_dict: Dict[(str, Any)]):
'\n Class containing data that describes a package API\n\n :param pkg_dict: A dictionary representation of a\n software package, complying with the output format of\n doppel-describe.\n\n '
self._validate_pkg(pkg_dict)
self.pkg_dict = pkg_dict |
@classmethod
def from_json(cls, filename: str) -> 'PackageAPI':
"\n Instantiate a Package object from a file.\n\n :param filename: Name of the JSON file\n that contains the description of the\n target package's API.\n\n "
_log_info(f'Creating package from {filename}')
with open(filename, 'r') as f:
pkg_dict = json.loads(f.read())
return cls(pkg_dict) | -6,133,293,852,267,853,000 | Instantiate a Package object from a file.
:param filename: Name of the JSON file
that contains the description of the
target package's API. | doppel/PackageAPI.py | from_json | franklinen/doppel-cli | python | @classmethod
def from_json(cls, filename: str) -> 'PackageAPI':
"\n Instantiate a Package object from a file.\n\n :param filename: Name of the JSON file\n that contains the description of the\n target package's API.\n\n "
_log_info(f'Creating package from {filename}')
with open(filename, 'r') as f:
pkg_dict = json.loads(f.read())
return cls(pkg_dict) |
def name(self) -> str:
'\n Get the name of the package.\n '
return self.pkg_dict['name'] | -4,031,569,820,273,227,000 | Get the name of the package. | doppel/PackageAPI.py | name | franklinen/doppel-cli | python | def name(self) -> str:
'\n \n '
return self.pkg_dict['name'] |
def num_functions(self) -> int:
'\n Get the number of exported functions in the package.\n '
return len(self.function_names()) | 358,728,798,804,206,400 | Get the number of exported functions in the package. | doppel/PackageAPI.py | num_functions | franklinen/doppel-cli | python | def num_functions(self) -> int:
'\n \n '
return len(self.function_names()) |
def function_names(self) -> List[str]:
'\n Get a list with the names of all exported functions\n in the package.\n '
return sorted(list(self.pkg_dict['functions'].keys())) | 3,015,912,207,251,559,000 | Get a list with the names of all exported functions
in the package. | doppel/PackageAPI.py | function_names | franklinen/doppel-cli | python | def function_names(self) -> List[str]:
'\n Get a list with the names of all exported functions\n in the package.\n '
return sorted(list(self.pkg_dict['functions'].keys())) |
def functions_with_args(self) -> Dict[(str, Dict[(str, Any)])]:
'\n Get a dictionary with all exported functions in the package\n and some details describing them.\n '
return self.pkg_dict['functions'] | -5,598,182,101,801,267,000 | Get a dictionary with all exported functions in the package
and some details describing them. | doppel/PackageAPI.py | functions_with_args | franklinen/doppel-cli | python | def functions_with_args(self) -> Dict[(str, Dict[(str, Any)])]:
'\n Get a dictionary with all exported functions in the package\n and some details describing them.\n '
return self.pkg_dict['functions'] |
def num_classes(self) -> int:
'\n Get the number of exported classes in the package.\n '
return len(self.class_names()) | -5,579,227,444,031,814,000 | Get the number of exported classes in the package. | doppel/PackageAPI.py | num_classes | franklinen/doppel-cli | python | def num_classes(self) -> int:
'\n \n '
return len(self.class_names()) |
def class_names(self) -> List[str]:
'\n Get a list with the names of all exported classes\n in the package.\n '
return sorted(list(self.pkg_dict['classes'].keys())) | -6,672,906,188,787,706,000 | Get a list with the names of all exported classes
in the package. | doppel/PackageAPI.py | class_names | franklinen/doppel-cli | python | def class_names(self) -> List[str]:
'\n Get a list with the names of all exported classes\n in the package.\n '
return sorted(list(self.pkg_dict['classes'].keys())) |
def public_methods(self, class_name: str) -> List[str]:
'\n Get a list with the names of all public methods for a class.\n\n :param class_name: Name of a class in the package\n '
return sorted(list(self.pkg_dict['classes'][class_name]['public_methods'].keys())) | 1,912,717,380,565,422,000 | Get a list with the names of all public methods for a class.
:param class_name: Name of a class in the package | doppel/PackageAPI.py | public_methods | franklinen/doppel-cli | python | def public_methods(self, class_name: str) -> List[str]:
'\n Get a list with the names of all public methods for a class.\n\n :param class_name: Name of a class in the package\n '
return sorted(list(self.pkg_dict['classes'][class_name]['public_methods'].keys())) |
def public_method_args(self, class_name: str, method_name: str) -> List[str]:
'\n Get a list of arguments for a public method from a class.\n\n :param class_name: Name of a class in the package\n :param method-name: Name of the method to get arguments for\n '
return list(self.pkg_dict['classes'][class_name]['public_methods'][method_name]['args']) | 8,502,998,262,803,455,000 | Get a list of arguments for a public method from a class.
:param class_name: Name of a class in the package
:param method-name: Name of the method to get arguments for | doppel/PackageAPI.py | public_method_args | franklinen/doppel-cli | python | def public_method_args(self, class_name: str, method_name: str) -> List[str]:
'\n Get a list of arguments for a public method from a class.\n\n :param class_name: Name of a class in the package\n :param method-name: Name of the method to get arguments for\n '
return list(self.pkg_dict['classes'][class_name]['public_methods'][method_name]['args']) |
@staticmethod
def get_placeholder(page, slot=None):
'\n Returns the named placeholder or, if no «slot» provided, the first\n editable, non-static placeholder or None.\n '
placeholders = page.get_placeholders()
if slot:
placeholders = placeholders.filter(slot=slot)
for ph in placeholders:
if ((not ph.is_static) and ph.is_editable):
return ph
return None | 1,405,203,255,283,725,800 | Returns the named placeholder or, if no «slot» provided, the first
editable, non-static placeholder or None. | cms/forms/wizards.py | get_placeholder | rspeed/django-cms-contrib | python | @staticmethod
def get_placeholder(page, slot=None):
'\n Returns the named placeholder or, if no «slot» provided, the first\n editable, non-static placeholder or None.\n '
placeholders = page.get_placeholders()
if slot:
placeholders = placeholders.filter(slot=slot)
for ph in placeholders:
if ((not ph.is_static) and ph.is_editable):
return ph
return None |
def clean(self):
'\n Validates that either the slug is provided, or that slugification from\n `title` produces a valid slug.\n :return:\n '
cleaned_data = super(CreateCMSPageForm, self).clean()
slug = cleaned_data.get('slug')
sub_page = cleaned_data.get('sub_page')
title = cleaned_data.get('title')
if self.page:
if sub_page:
parent = self.page
else:
parent = self.page.parent
else:
parent = None
if slug:
starting_point = slug
elif title:
starting_point = title
else:
starting_point = _('page')
slug = generate_valid_slug(starting_point, parent, self.language_code)
if (not slug):
raise forms.ValidationError('Please provide a valid slug.')
cleaned_data['slug'] = slug
return cleaned_data | 13,318,186,756,332,642 | Validates that either the slug is provided, or that slugification from
`title` produces a valid slug.
:return: | cms/forms/wizards.py | clean | rspeed/django-cms-contrib | python | def clean(self):
'\n Validates that either the slug is provided, or that slugification from\n `title` produces a valid slug.\n :return:\n '
cleaned_data = super(CreateCMSPageForm, self).clean()
slug = cleaned_data.get('slug')
sub_page = cleaned_data.get('sub_page')
title = cleaned_data.get('title')
if self.page:
if sub_page:
parent = self.page
else:
parent = self.page.parent
else:
parent = None
if slug:
starting_point = slug
elif title:
starting_point = title
else:
starting_point = _('page')
slug = generate_valid_slug(starting_point, parent, self.language_code)
if (not slug):
raise forms.ValidationError('Please provide a valid slug.')
cleaned_data['slug'] = slug
return cleaned_data |
def __init__(self, application, hostname, key):
"\n\t\t:param application: The application to associate this popup dialog with.\n\t\t:type application: :py:class:`.KingPhisherClientApplication`\n\t\t:param str hostname: The hostname associated with the key.\n\t\t:param key: The host's SSH key.\n\t\t:type key: :py:class:`paramiko.pkey.PKey`\n\t\t"
super(BaseHostKeyDialog, self).__init__(application)
self.hostname = hostname
self.key = key
textview = self.gobjects['textview_key_details']
textview.modify_font(Pango.FontDescription('monospace 9'))
textview.get_buffer().set_text(self.key_details)
if (self.default_response is not None):
button = self.dialog.get_widget_for_response(response_id=self.default_response)
button.grab_default() | -28,344,514,441,261,160 | :param application: The application to associate this popup dialog with.
:type application: :py:class:`.KingPhisherClientApplication`
:param str hostname: The hostname associated with the key.
:param key: The host's SSH key.
:type key: :py:class:`paramiko.pkey.PKey` | king_phisher/client/dialogs/ssh_host_key.py | __init__ | tanc7/king-phisher | python | def __init__(self, application, hostname, key):
"\n\t\t:param application: The application to associate this popup dialog with.\n\t\t:type application: :py:class:`.KingPhisherClientApplication`\n\t\t:param str hostname: The hostname associated with the key.\n\t\t:param key: The host's SSH key.\n\t\t:type key: :py:class:`paramiko.pkey.PKey`\n\t\t"
super(BaseHostKeyDialog, self).__init__(application)
self.hostname = hostname
self.key = key
textview = self.gobjects['textview_key_details']
textview.modify_font(Pango.FontDescription('monospace 9'))
textview.get_buffer().set_text(self.key_details)
if (self.default_response is not None):
button = self.dialog.get_widget_for_response(response_id=self.default_response)
button.grab_default() |
def __init__(self, application):
'\n\t\t:param application: The application which is using this policy.\n\t\t:type application: :py:class:`.KingPhisherClientApplication`\n\t\t'
self.application = application
self.logger = logging.getLogger(('KingPhisher.Client.' + self.__class__.__name__))
super(MissingHostKeyPolicy, self).__init__() | -4,761,189,396,857,635,000 | :param application: The application which is using this policy.
:type application: :py:class:`.KingPhisherClientApplication` | king_phisher/client/dialogs/ssh_host_key.py | __init__ | tanc7/king-phisher | python | def __init__(self, application):
'\n\t\t:param application: The application which is using this policy.\n\t\t:type application: :py:class:`.KingPhisherClientApplication`\n\t\t'
self.application = application
self.logger = logging.getLogger(('KingPhisher.Client.' + self.__class__.__name__))
super(MissingHostKeyPolicy, self).__init__() |
def generate_bubblesort(prefix, num_examples, debug=False, maximum=10000000000, debug_every=1000):
"\n Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified\n number of examples.\n\n :param prefix: String prefix for saving the file ('train', 'test')\n :param num_examples: Number of examples to generate.\n "
data = []
for i in range(num_examples):
array = np.random.randint(10, size=5)
if (debug and ((i % debug_every) == 0)):
traces = Trace(array, True).traces
else:
traces = Trace(array).traces
data.append((array, traces))
with open('tasks/bubblesort/data/{}.pik'.format(prefix), 'wb') as f:
pickle.dump(data, f) | -4,308,264,678,087,419,000 | Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified
number of examples.
:param prefix: String prefix for saving the file ('train', 'test')
:param num_examples: Number of examples to generate. | tasks/bubblesort/env/generate_data.py | generate_bubblesort | ford-core-ai/neural-programming-architectures | python | def generate_bubblesort(prefix, num_examples, debug=False, maximum=10000000000, debug_every=1000):
"\n Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified\n number of examples.\n\n :param prefix: String prefix for saving the file ('train', 'test')\n :param num_examples: Number of examples to generate.\n "
data = []
for i in range(num_examples):
array = np.random.randint(10, size=5)
if (debug and ((i % debug_every) == 0)):
traces = Trace(array, True).traces
else:
traces = Trace(array).traces
data.append((array, traces))
with open('tasks/bubblesort/data/{}.pik'.format(prefix), 'wb') as f:
pickle.dump(data, f) |
def check():
' check that all paths are properly defined'
checked = True
print(f' - history tar files will be mounted on: {dirmounted_root}')
print(f' - ratarmount executable is in : {ratarmount}') | 7,019,680,802,621,906,000 | check that all paths are properly defined | paragridded/giga_tools.py | check | Mesharou/paragridded | python | def check():
' '
checked = True
print(f' - history tar files will be mounted on: {dirmounted_root}')
print(f' - ratarmount executable is in : {ratarmount}') |
def get_subdmap(directory):
'Reconstruct how netCDF files are stored in fused directory\n\n directory == dirgrid | dirhis '
_subdmap = {}
for subd in subdomains:
fs = glob.glob((directory.format(subd=subd) + '/*.nc'))
tiles = [int(f.split('.')[(- 2)]) for f in fs]
for t in tiles:
_subdmap[t] = subd
return _subdmap | 5,205,752,084,815,372,000 | Reconstruct how netCDF files are stored in fused directory
directory == dirgrid | dirhis | paragridded/giga_tools.py | get_subdmap | Mesharou/paragridded | python | def get_subdmap(directory):
'Reconstruct how netCDF files are stored in fused directory\n\n directory == dirgrid | dirhis '
_subdmap = {}
for subd in subdomains:
fs = glob.glob((directory.format(subd=subd) + '/*.nc'))
tiles = [int(f.split('.')[(- 2)]) for f in fs]
for t in tiles:
_subdmap[t] = subd
return _subdmap |
def mount_tar(source, tarfile, destdir):
'\n source: str, directory of the tar files\n template: str, template name for the tar file containing "{subd"\n subd: int, index of the subdomain (0<=subd<=13)\n destdir: str, directory where to archivemount\n\n '
srcfile = f'{source}/{tarfile}'
assert os.path.isfile(srcfile), f'{srcfile} does not exsit'
sqlitefile = get_sqlitefilename(srcfile)
home = os.path.expanduser('~')
ratardirsqlite = f'{home}/.ratarmount'
if os.path.isfile(f'{ratardirsqlite}/{sqlitefile}'):
pass
elif os.path.isfile(f'{sqlitesdir}/{sqlitefile}'):
command = f'cp {sqlitesdir}/{sqlitefile} {ratardirsqlite}/'
os.system(command)
assert (len(ratarmount) > 0), BB('You forgot to set the ratarmount path')
command = f'{ratarmount} {srcfile} {destdir}'
os.system(command)
if os.path.isfile(f'{sqlitesdir}/{sqlitefile}'):
pass
else:
command = f'cp {ratardirsqlite}/{sqlitefile} {sqlitesdir}/'
os.system(command) | 4,785,231,440,578,954,000 | source: str, directory of the tar files
template: str, template name for the tar file containing "{subd"
subd: int, index of the subdomain (0<=subd<=13)
destdir: str, directory where to archivemount | paragridded/giga_tools.py | mount_tar | Mesharou/paragridded | python | def mount_tar(source, tarfile, destdir):
'\n source: str, directory of the tar files\n template: str, template name for the tar file containing "{subd"\n subd: int, index of the subdomain (0<=subd<=13)\n destdir: str, directory where to archivemount\n\n '
srcfile = f'{source}/{tarfile}'
assert os.path.isfile(srcfile), f'{srcfile} does not exsit'
sqlitefile = get_sqlitefilename(srcfile)
home = os.path.expanduser('~')
ratardirsqlite = f'{home}/.ratarmount'
if os.path.isfile(f'{ratardirsqlite}/{sqlitefile}'):
pass
elif os.path.isfile(f'{sqlitesdir}/{sqlitefile}'):
command = f'cp {sqlitesdir}/{sqlitefile} {ratardirsqlite}/'
os.system(command)
assert (len(ratarmount) > 0), BB('You forgot to set the ratarmount path')
command = f'{ratarmount} {srcfile} {destdir}'
os.system(command)
if os.path.isfile(f'{sqlitesdir}/{sqlitefile}'):
pass
else:
command = f'cp {ratardirsqlite}/{sqlitefile} {sqlitesdir}/'
os.system(command) |
def mount(subd, grid=False, overwrite=True):
'Mount tar file `subd`'
if grid:
destdir = dirgrid.format(subd=subd)
srcdir = dirgridtar.format(subd=subd)
tarfile = targridtemplate.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
srcdir = dirgigaref.format(subd=subd)
tarfile = tarhistemplate.format(hisdate=hisdate, subd=subd)
tomount = True
if os.path.exists(destdir):
if (len(os.listdir(destdir)) == 0):
pass
elif overwrite:
command = f'fusermount -u {destdir}'
try:
os.system(command)
except:
pass
assert (len(os.listdir(f'{destdir}')) == 0)
else:
tomount = False
else:
print(f'*** makedir {destdir}')
if tomount:
mount_tar(srcdir, tarfile, destdir)
if (not grid):
write_toc(destdir, subd, hisdate) | -7,433,501,504,231,536,000 | Mount tar file `subd` | paragridded/giga_tools.py | mount | Mesharou/paragridded | python | def mount(subd, grid=False, overwrite=True):
if grid:
destdir = dirgrid.format(subd=subd)
srcdir = dirgridtar.format(subd=subd)
tarfile = targridtemplate.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
srcdir = dirgigaref.format(subd=subd)
tarfile = tarhistemplate.format(hisdate=hisdate, subd=subd)
tomount = True
if os.path.exists(destdir):
if (len(os.listdir(destdir)) == 0):
pass
elif overwrite:
command = f'fusermount -u {destdir}'
try:
os.system(command)
except:
pass
assert (len(os.listdir(f'{destdir}')) == 0)
else:
tomount = False
else:
print(f'*** makedir {destdir}')
if tomount:
mount_tar(srcdir, tarfile, destdir)
if (not grid):
write_toc(destdir, subd, hisdate) |
def mount_stats(grid=False):
' Print statistics on mounted tar files'
print(('-' * 40))
print(BB('statistics on mounted tar files'))
print(f'mounting point: {dirmounted}')
for subd in subdomains:
if grid:
destdir = dirgrid.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
if os.path.exists(destdir):
filelist = os.listdir(f'{destdir}')
nbfiles = len(filelist)
if (nbfiles > 0):
tiles = set([int(f.split('.')[(- 2)]) for f in filelist])
nbtiles = len(tiles)
tile = list(tiles)[0]
fs = [f for f in filelist if (f'{tile:04}.nc' in f)]
if grid:
msg = f' - {subd:02} : {nbtiles:03} tiles'
else:
_hisdate = read_toc(destdir, subd)
bbhisdate = BB(_hisdate)
msg = f' - {subd:02} : {bbhisdate} with {nbtiles:03} tiles'
else:
msg = f' - {subd:02} : empty'
else:
warning = BB('destroyed')
msg = f' - {subd:02} : {warning}'
print(msg) | 8,784,130,287,067,626,000 | Print statistics on mounted tar files | paragridded/giga_tools.py | mount_stats | Mesharou/paragridded | python | def mount_stats(grid=False):
' '
print(('-' * 40))
print(BB('statistics on mounted tar files'))
print(f'mounting point: {dirmounted}')
for subd in subdomains:
if grid:
destdir = dirgrid.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
if os.path.exists(destdir):
filelist = os.listdir(f'{destdir}')
nbfiles = len(filelist)
if (nbfiles > 0):
tiles = set([int(f.split('.')[(- 2)]) for f in filelist])
nbtiles = len(tiles)
tile = list(tiles)[0]
fs = [f for f in filelist if (f'{tile:04}.nc' in f)]
if grid:
msg = f' - {subd:02} : {nbtiles:03} tiles'
else:
_hisdate = read_toc(destdir, subd)
bbhisdate = BB(_hisdate)
msg = f' - {subd:02} : {bbhisdate} with {nbtiles:03} tiles'
else:
msg = f' - {subd:02} : empty'
else:
warning = BB('destroyed')
msg = f' - {subd:02} : {warning}'
print(msg) |
def umount(subd, grid=False):
' Unmount `subd` tar archive folder\n\n The command to unmount a fuse folder is fusermount -u'
if grid:
destdir = dirgrid.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
if (os.path.isdir(destdir) and (len(os.listdir(f'{destdir}')) != 0)):
command = f'fusermount -u {destdir}'
os.system(command)
else:
pass | -2,445,494,873,886,492,700 | Unmount `subd` tar archive folder
The command to unmount a fuse folder is fusermount -u | paragridded/giga_tools.py | umount | Mesharou/paragridded | python | def umount(subd, grid=False):
' Unmount `subd` tar archive folder\n\n The command to unmount a fuse folder is fusermount -u'
if grid:
destdir = dirgrid.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
if (os.path.isdir(destdir) and (len(os.listdir(f'{destdir}')) != 0)):
command = f'fusermount -u {destdir}'
os.system(command)
else:
pass |
def LLTP2domain(lowerleft, topright):
'Convert the two pairs of (lower, left), (top, right) in (lat, lon)\n into the four pairs of (lat, lon) of the corners '
(xa, ya) = lowerleft
(xb, yb) = topright
domain = [(xa, ya), (xa, yb), (xb, yb), (xb, ya)]
return domain | -7,237,054,415,005,802,000 | Convert the two pairs of (lower, left), (top, right) in (lat, lon)
into the four pairs of (lat, lon) of the corners | paragridded/giga_tools.py | LLTP2domain | Mesharou/paragridded | python | def LLTP2domain(lowerleft, topright):
'Convert the two pairs of (lower, left), (top, right) in (lat, lon)\n into the four pairs of (lat, lon) of the corners '
(xa, ya) = lowerleft
(xb, yb) = topright
domain = [(xa, ya), (xa, yb), (xb, yb), (xb, ya)]
return domain |
def find_tiles_inside(domain, corners):
'Determine which tiles are inside `domain`\n\n The function uses `corners` the list of corners for each tile\n '
p = Polygon(domain)
tileslist = []
for (tile, c) in corners.items():
q = Polygon(c)
if (p.overlaps(q) or p.contains(q)):
tileslist += [tile]
return tileslist | -2,155,202,173,137,227,300 | Determine which tiles are inside `domain`
The function uses `corners` the list of corners for each tile | paragridded/giga_tools.py | find_tiles_inside | Mesharou/paragridded | python | def find_tiles_inside(domain, corners):
'Determine which tiles are inside `domain`\n\n The function uses `corners` the list of corners for each tile\n '
p = Polygon(domain)
tileslist = []
for (tile, c) in corners.items():
q = Polygon(c)
if (p.overlaps(q) or p.contains(q)):
tileslist += [tile]
return tileslist |
def get_dates():
'\n Scan dirgiga for *tar files\n '
subd = 1
pattern = f'{dirgigaref}/*.{subd:02}.tar'.format(subd=subd)
files = glob.glob(pattern)
_dates_tar = [f.split('/')[(- 1)].split('.')[(- 3)] for f in files]
return sorted(_dates_tar) | 2,012,071,602,746,254,300 | Scan dirgiga for *tar files | paragridded/giga_tools.py | get_dates | Mesharou/paragridded | python | def get_dates():
'\n \n '
subd = 1
pattern = f'{dirgigaref}/*.{subd:02}.tar'.format(subd=subd)
files = glob.glob(pattern)
_dates_tar = [f.split('/')[(- 1)].split('.')[(- 3)] for f in files]
return sorted(_dates_tar) |
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
'Set a default time zone to be used when none is specified.\n\n Async friendly.\n '
global DEFAULT_TIME_ZONE
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone | 8,305,351,147,355,129,000 | Set a default time zone to be used when none is specified.
Async friendly. | homeassistant/util/dt.py | set_default_time_zone | 854562/home-assistant | python | def set_default_time_zone(time_zone: dt.tzinfo) -> None:
'Set a default time zone to be used when none is specified.\n\n Async friendly.\n '
global DEFAULT_TIME_ZONE
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone |
def get_time_zone(time_zone_str: str) -> Optional[dt.tzinfo]:
'Get time zone from string. Return None if unable to determine.\n\n Async friendly.\n '
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None | 808,354,402,533,898,000 | Get time zone from string. Return None if unable to determine.
Async friendly. | homeassistant/util/dt.py | get_time_zone | 854562/home-assistant | python | def get_time_zone(time_zone_str: str) -> Optional[dt.tzinfo]:
'Get time zone from string. Return None if unable to determine.\n\n Async friendly.\n '
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None |
def utcnow() -> dt.datetime:
'Get now in UTC time.'
return dt.datetime.now(UTC) | -7,757,326,031,541,859,000 | Get now in UTC time. | homeassistant/util/dt.py | utcnow | 854562/home-assistant | python | def utcnow() -> dt.datetime:
return dt.datetime.now(UTC) |
def now(time_zone: Optional[dt.tzinfo]=None) -> dt.datetime:
'Get now in specified time zone.'
return dt.datetime.now((time_zone or DEFAULT_TIME_ZONE)) | -7,334,469,809,376,690,000 | Get now in specified time zone. | homeassistant/util/dt.py | now | 854562/home-assistant | python | def now(time_zone: Optional[dt.tzinfo]=None) -> dt.datetime:
return dt.datetime.now((time_zone or DEFAULT_TIME_ZONE)) |
def as_utc(dattim: dt.datetime) -> dt.datetime:
'Return a datetime as UTC time.\n\n Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.\n '
if (dattim.tzinfo == UTC):
return dattim
if (dattim.tzinfo is None):
dattim = DEFAULT_TIME_ZONE.localize(dattim)
return dattim.astimezone(UTC) | -256,635,588,040,750,370 | Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE. | homeassistant/util/dt.py | as_utc | 854562/home-assistant | python | def as_utc(dattim: dt.datetime) -> dt.datetime:
'Return a datetime as UTC time.\n\n Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.\n '
if (dattim.tzinfo == UTC):
return dattim
if (dattim.tzinfo is None):
dattim = DEFAULT_TIME_ZONE.localize(dattim)
return dattim.astimezone(UTC) |
def as_timestamp(dt_value: dt.datetime) -> float:
'Convert a date/time into a unix time (seconds since 1970).'
if hasattr(dt_value, 'timestamp'):
parsed_dt: Optional[dt.datetime] = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if (parsed_dt is None):
raise ValueError('not a valid date/time.')
return parsed_dt.timestamp() | 7,903,070,737,980,607,000 | Convert a date/time into a unix time (seconds since 1970). | homeassistant/util/dt.py | as_timestamp | 854562/home-assistant | python | def as_timestamp(dt_value: dt.datetime) -> float:
if hasattr(dt_value, 'timestamp'):
parsed_dt: Optional[dt.datetime] = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if (parsed_dt is None):
raise ValueError('not a valid date/time.')
return parsed_dt.timestamp() |
def as_local(dattim: dt.datetime) -> dt.datetime:
'Convert a UTC datetime object to local time zone.'
if (dattim.tzinfo == DEFAULT_TIME_ZONE):
return dattim
if (dattim.tzinfo is None):
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE) | 2,996,560,705,096,557,600 | Convert a UTC datetime object to local time zone. | homeassistant/util/dt.py | as_local | 854562/home-assistant | python | def as_local(dattim: dt.datetime) -> dt.datetime:
if (dattim.tzinfo == DEFAULT_TIME_ZONE):
return dattim
if (dattim.tzinfo is None):
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE) |
def utc_from_timestamp(timestamp: float) -> dt.datetime:
'Return a UTC time from a timestamp.'
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp)) | -6,724,019,066,667,065,000 | Return a UTC time from a timestamp. | homeassistant/util/dt.py | utc_from_timestamp | 854562/home-assistant | python | def utc_from_timestamp(timestamp: float) -> dt.datetime:
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp)) |
def start_of_local_day(dt_or_d: Union[(dt.date, dt.datetime, None)]=None) -> dt.datetime:
'Return local datetime object of start of day from date or datetime.'
if (dt_or_d is None):
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
return DEFAULT_TIME_ZONE.localize(dt.datetime.combine(date, dt.time())) | -5,787,161,904,655,488,000 | Return local datetime object of start of day from date or datetime. | homeassistant/util/dt.py | start_of_local_day | 854562/home-assistant | python | def start_of_local_day(dt_or_d: Union[(dt.date, dt.datetime, None)]=None) -> dt.datetime:
if (dt_or_d is None):
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
return DEFAULT_TIME_ZONE.localize(dt.datetime.combine(date, dt.time())) |
def parse_datetime(dt_str: str) -> Optional[dt.datetime]:
"Parse a string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n Raises ValueError if the input is well formatted but not a valid datetime.\n Returns None if the input isn't well formatted.\n "
match = DATETIME_RE.match(dt_str)
if (not match):
return None
kws: Dict[(str, Any)] = match.groupdict()
if kws['microsecond']:
kws['microsecond'] = kws['microsecond'].ljust(6, '0')
tzinfo_str = kws.pop('tzinfo')
tzinfo: Optional[dt.tzinfo] = None
if (tzinfo_str == 'Z'):
tzinfo = UTC
elif (tzinfo_str is not None):
offset_mins = (int(tzinfo_str[(- 2):]) if (len(tzinfo_str) > 3) else 0)
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if (tzinfo_str[0] == '-'):
offset = (- offset)
tzinfo = dt.timezone(offset)
kws = {k: int(v) for (k, v) in kws.items() if (v is not None)}
kws['tzinfo'] = tzinfo
return dt.datetime(**kws) | -1,937,966,146,818,874,600 | Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted. | homeassistant/util/dt.py | parse_datetime | 854562/home-assistant | python | def parse_datetime(dt_str: str) -> Optional[dt.datetime]:
"Parse a string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n Raises ValueError if the input is well formatted but not a valid datetime.\n Returns None if the input isn't well formatted.\n "
match = DATETIME_RE.match(dt_str)
if (not match):
return None
kws: Dict[(str, Any)] = match.groupdict()
if kws['microsecond']:
kws['microsecond'] = kws['microsecond'].ljust(6, '0')
tzinfo_str = kws.pop('tzinfo')
tzinfo: Optional[dt.tzinfo] = None
if (tzinfo_str == 'Z'):
tzinfo = UTC
elif (tzinfo_str is not None):
offset_mins = (int(tzinfo_str[(- 2):]) if (len(tzinfo_str) > 3) else 0)
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if (tzinfo_str[0] == '-'):
offset = (- offset)
tzinfo = dt.timezone(offset)
kws = {k: int(v) for (k, v) in kws.items() if (v is not None)}
kws['tzinfo'] = tzinfo
return dt.datetime(**kws) |
def parse_date(dt_str: str) -> Optional[dt.date]:
'Convert a date string to a date object.'
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError:
return None | -1,140,153,710,754,188,500 | Convert a date string to a date object. | homeassistant/util/dt.py | parse_date | 854562/home-assistant | python | def parse_date(dt_str: str) -> Optional[dt.date]:
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError:
return None |
def parse_time(time_str: str) -> Optional[dt.time]:
'Parse a time string (00:20:00) into Time object.\n\n Return None if invalid.\n '
parts = str(time_str).split(':')
if (len(parts) < 2):
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = (int(parts[2]) if (len(parts) > 2) else 0)
return dt.time(hour, minute, second)
except ValueError:
return None | 4,760,396,034,145,555,000 | Parse a time string (00:20:00) into Time object.
Return None if invalid. | homeassistant/util/dt.py | parse_time | 854562/home-assistant | python | def parse_time(time_str: str) -> Optional[dt.time]:
'Parse a time string (00:20:00) into Time object.\n\n Return None if invalid.\n '
parts = str(time_str).split(':')
if (len(parts) < 2):
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = (int(parts[2]) if (len(parts) > 2) else 0)
return dt.time(hour, minute, second)
except ValueError:
return None |
def get_age(date: dt.datetime) -> str:
'\n Take a datetime and return its "age" as a string.\n\n The age can be in second, minute, hour, day, month or year. Only the\n biggest unit is considered, e.g. if it\'s 2 days and 3 hours, "2 days" will\n be returned.\n Make sure date is not in the future, or else it won\'t work.\n '
def formatn(number: int, unit: str) -> str:
'Add "unit" if it\'s plural.'
if (number == 1):
return f'1 {unit}'
return f'{number:d} {unit}s'
def q_n_r(first: int, second: int) -> Tuple[(int, int)]:
'Return quotient and remaining.'
return ((first // second), (first % second))
delta = (now() - date)
day = delta.days
second = delta.seconds
(year, day) = q_n_r(day, 365)
if (year > 0):
return formatn(year, 'year')
(month, day) = q_n_r(day, 30)
if (month > 0):
return formatn(month, 'month')
if (day > 0):
return formatn(day, 'day')
(hour, second) = q_n_r(second, 3600)
if (hour > 0):
return formatn(hour, 'hour')
(minute, second) = q_n_r(second, 60)
if (minute > 0):
return formatn(minute, 'minute')
return formatn(second, 'second') | -8,345,418,009,683,860,000 | Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work. | homeassistant/util/dt.py | get_age | 854562/home-assistant | python | def get_age(date: dt.datetime) -> str:
'\n Take a datetime and return its "age" as a string.\n\n The age can be in second, minute, hour, day, month or year. Only the\n biggest unit is considered, e.g. if it\'s 2 days and 3 hours, "2 days" will\n be returned.\n Make sure date is not in the future, or else it won\'t work.\n '
def formatn(number: int, unit: str) -> str:
'Add "unit" if it\'s plural.'
if (number == 1):
return f'1 {unit}'
return f'{number:d} {unit}s'
def q_n_r(first: int, second: int) -> Tuple[(int, int)]:
'Return quotient and remaining.'
return ((first // second), (first % second))
delta = (now() - date)
day = delta.days
second = delta.seconds
(year, day) = q_n_r(day, 365)
if (year > 0):
return formatn(year, 'year')
(month, day) = q_n_r(day, 30)
if (month > 0):
return formatn(month, 'month')
if (day > 0):
return formatn(day, 'day')
(hour, second) = q_n_r(second, 3600)
if (hour > 0):
return formatn(hour, 'hour')
(minute, second) = q_n_r(second, 60)
if (minute > 0):
return formatn(minute, 'minute')
return formatn(second, 'second') |
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> List[int]:
'Parse the time expression part and return a list of times to match.'
if ((parameter is None) or (parameter == MATCH_ALL)):
res = list(range(min_value, (max_value + 1)))
elif (isinstance(parameter, str) and parameter.startswith('/')):
parameter = int(parameter[1:])
res = [x for x in range(min_value, (max_value + 1)) if ((x % parameter) == 0)]
elif (not hasattr(parameter, '__iter__')):
res = [int(parameter)]
else:
res = list(sorted((int(x) for x in parameter)))
for val in res:
if ((val < min_value) or (val > max_value)):
raise ValueError("Time expression '{}': parameter {} out of range ({} to {})".format(parameter, val, min_value, max_value))
return res | 8,850,174,465,410,132,000 | Parse the time expression part and return a list of times to match. | homeassistant/util/dt.py | parse_time_expression | 854562/home-assistant | python | def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> List[int]:
if ((parameter is None) or (parameter == MATCH_ALL)):
res = list(range(min_value, (max_value + 1)))
elif (isinstance(parameter, str) and parameter.startswith('/')):
parameter = int(parameter[1:])
res = [x for x in range(min_value, (max_value + 1)) if ((x % parameter) == 0)]
elif (not hasattr(parameter, '__iter__')):
res = [int(parameter)]
else:
res = list(sorted((int(x) for x in parameter)))
for val in res:
if ((val < min_value) or (val > max_value)):
raise ValueError("Time expression '{}': parameter {} out of range ({} to {})".format(parameter, val, min_value, max_value))
return res |
def find_next_time_expression_time(now: dt.datetime, seconds: List[int], minutes: List[int], hours: List[int]) -> dt.datetime:
'Find the next datetime from now for which the time expression matches.\n\n The algorithm looks at each time unit separately and tries to find the\n next one that matches for each. If any of them would roll over, all\n time units below that are reset to the first matching value.\n\n Timezones are also handled (the tzinfo of the now object is used),\n including daylight saving time.\n '
if ((not seconds) or (not minutes) or (not hours)):
raise ValueError('Cannot find a next time: Time expression never matches!')
def _lower_bound(arr: List[int], cmp: int) -> Optional[int]:
'Return the first value in arr greater or equal to cmp.\n\n Return None if no such value exists.\n '
left = 0
right = len(arr)
while (left < right):
mid = ((left + right) // 2)
if (arr[mid] < cmp):
left = (mid + 1)
else:
right = mid
if (left == len(arr)):
return None
return arr[left]
result = now.replace(microsecond=0)
next_second = _lower_bound(seconds, result.second)
if (next_second is None):
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
next_minute = _lower_bound(minutes, result.minute)
if (next_minute != result.minute):
result = result.replace(second=seconds[0])
if (next_minute is None):
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
next_hour = _lower_bound(hours, result.hour)
if (next_hour != result.hour):
result = result.replace(second=seconds[0], minute=minutes[0])
if (next_hour is None):
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if (result.tzinfo is None):
return result
tzinfo: pytzinfo.DstTzInfo = result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
result = (result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1))
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst())
if (result_dst >= now_dst):
return result
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
check = (now - now_dst)
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result | -6,388,431,229,437,913,000 | Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time. | homeassistant/util/dt.py | find_next_time_expression_time | 854562/home-assistant | python | def find_next_time_expression_time(now: dt.datetime, seconds: List[int], minutes: List[int], hours: List[int]) -> dt.datetime:
'Find the next datetime from now for which the time expression matches.\n\n The algorithm looks at each time unit separately and tries to find the\n next one that matches for each. If any of them would roll over, all\n time units below that are reset to the first matching value.\n\n Timezones are also handled (the tzinfo of the now object is used),\n including daylight saving time.\n '
if ((not seconds) or (not minutes) or (not hours)):
raise ValueError('Cannot find a next time: Time expression never matches!')
def _lower_bound(arr: List[int], cmp: int) -> Optional[int]:
'Return the first value in arr greater or equal to cmp.\n\n Return None if no such value exists.\n '
left = 0
right = len(arr)
while (left < right):
mid = ((left + right) // 2)
if (arr[mid] < cmp):
left = (mid + 1)
else:
right = mid
if (left == len(arr)):
return None
return arr[left]
result = now.replace(microsecond=0)
next_second = _lower_bound(seconds, result.second)
if (next_second is None):
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
next_minute = _lower_bound(minutes, result.minute)
if (next_minute != result.minute):
result = result.replace(second=seconds[0])
if (next_minute is None):
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
next_hour = _lower_bound(hours, result.hour)
if (next_hour != result.hour):
result = result.replace(second=seconds[0], minute=minutes[0])
if (next_hour is None):
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if (result.tzinfo is None):
return result
tzinfo: pytzinfo.DstTzInfo = result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
result = (result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1))
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst())
if (result_dst >= now_dst):
return result
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
check = (now - now_dst)
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result |
def formatn(number: int, unit: str) -> str:
'Add "unit" if it\'s plural.'
if (number == 1):
return f'1 {unit}'
return f'{number:d} {unit}s' | 6,630,770,749,241,600,000 | Add "unit" if it's plural. | homeassistant/util/dt.py | formatn | 854562/home-assistant | python | def formatn(number: int, unit: str) -> str:
'Add "unit" if it\'s plural.'
if (number == 1):
return f'1 {unit}'
return f'{number:d} {unit}s' |
def q_n_r(first: int, second: int) -> Tuple[(int, int)]:
'Return quotient and remaining.'
return ((first // second), (first % second)) | -3,372,020,599,350,087,700 | Return quotient and remaining. | homeassistant/util/dt.py | q_n_r | 854562/home-assistant | python | def q_n_r(first: int, second: int) -> Tuple[(int, int)]:
return ((first // second), (first % second)) |
def _lower_bound(arr: List[int], cmp: int) -> Optional[int]:
'Return the first value in arr greater or equal to cmp.\n\n Return None if no such value exists.\n '
left = 0
right = len(arr)
while (left < right):
mid = ((left + right) // 2)
if (arr[mid] < cmp):
left = (mid + 1)
else:
right = mid
if (left == len(arr)):
return None
return arr[left] | -4,479,979,004,816,162,300 | Return the first value in arr greater or equal to cmp.
Return None if no such value exists. | homeassistant/util/dt.py | _lower_bound | 854562/home-assistant | python | def _lower_bound(arr: List[int], cmp: int) -> Optional[int]:
'Return the first value in arr greater or equal to cmp.\n\n Return None if no such value exists.\n '
left = 0
right = len(arr)
while (left < right):
mid = ((left + right) // 2)
if (arr[mid] < cmp):
left = (mid + 1)
else:
right = mid
if (left == len(arr)):
return None
return arr[left] |
def __init__(self, location: str=dataset_dir('MSLR10K'), split: str='train', fold: int=1, normalize: bool=True, filter_queries: Optional[bool]=None, download: bool=True, validate_checksums: bool=True):
'\n Args:\n location: Directory where the dataset is located.\n split: The data split to load ("train", "test" or "vali")\n fold: Which data fold to load (1...5)\n normalize: Whether to perform query-level feature\n normalization.\n filter_queries: Whether to filter out queries that\n have no relevant items. If not given this will filter queries\n for the test set but not the train set.\n download: Whether to download the dataset if it does not\n exist.\n validate_checksums: Whether to validate the dataset files\n via sha256.\n '
if (split not in MSLR10K.splits.keys()):
raise ValueError(("unrecognized data split '%s'" % str(split)))
if (fold not in MSLR10K.per_fold_expected_files.keys()):
raise ValueError(("unrecognized data fold '%s'" % str(fold)))
validate_and_download(location=location, expected_files=MSLR10K.per_fold_expected_files[fold], downloader=(MSLR10K.downloader if download else None), validate_checksums=validate_checksums)
if (filter_queries is None):
filter_queries = (False if (split == 'train') else True)
datafile = os.path.join(location, ('Fold%d' % fold), MSLR10K.splits[split])
super().__init__(file=datafile, sparse=False, normalize=normalize, filter_queries=filter_queries, zero_based='auto') | 7,654,225,927,789,626,000 | Args:
location: Directory where the dataset is located.
split: The data split to load ("train", "test" or "vali")
fold: Which data fold to load (1...5)
normalize: Whether to perform query-level feature
normalization.
filter_queries: Whether to filter out queries that
have no relevant items. If not given this will filter queries
for the test set but not the train set.
download: Whether to download the dataset if it does not
exist.
validate_checksums: Whether to validate the dataset files
via sha256. | pytorchltr/datasets/svmrank/mslr10k.py | __init__ | SuperXiang/pytorchltr | python | def __init__(self, location: str=dataset_dir('MSLR10K'), split: str='train', fold: int=1, normalize: bool=True, filter_queries: Optional[bool]=None, download: bool=True, validate_checksums: bool=True):
'\n Args:\n location: Directory where the dataset is located.\n split: The data split to load ("train", "test" or "vali")\n fold: Which data fold to load (1...5)\n normalize: Whether to perform query-level feature\n normalization.\n filter_queries: Whether to filter out queries that\n have no relevant items. If not given this will filter queries\n for the test set but not the train set.\n download: Whether to download the dataset if it does not\n exist.\n validate_checksums: Whether to validate the dataset files\n via sha256.\n '
if (split not in MSLR10K.splits.keys()):
raise ValueError(("unrecognized data split '%s'" % str(split)))
if (fold not in MSLR10K.per_fold_expected_files.keys()):
raise ValueError(("unrecognized data fold '%s'" % str(fold)))
validate_and_download(location=location, expected_files=MSLR10K.per_fold_expected_files[fold], downloader=(MSLR10K.downloader if download else None), validate_checksums=validate_checksums)
if (filter_queries is None):
filter_queries = (False if (split == 'train') else True)
datafile = os.path.join(location, ('Fold%d' % fold), MSLR10K.splits[split])
super().__init__(file=datafile, sparse=False, normalize=normalize, filter_queries=filter_queries, zero_based='auto') |
def tokenizeInput(self, token):
"\n Cleans and tokenizes the user's input.\n\n empty characters and spaces are trimmed to prevent\n matching all paths in the index.\n "
return list(filter(None, re.split(self.options.input_tokenizer, self.clean(token)))) | 5,647,962,165,047,563,000 | Cleans and tokenizes the user's input.
empty characters and spaces are trimmed to prevent
matching all paths in the index. | gooey/gui/components/filtering/prefix_filter.py | tokenizeInput | QuantumSpatialInc/Gooey | python | def tokenizeInput(self, token):
"\n Cleans and tokenizes the user's input.\n\n empty characters and spaces are trimmed to prevent\n matching all paths in the index.\n "
return list(filter(None, re.split(self.options.input_tokenizer, self.clean(token)))) |
def tokenizeChoice(self, choice):
"\n Splits the `choice` into a series of tokens based on\n the user's criteria.\n\n If suffix indexing is enabled, the individual tokens\n are further broken down and indexed by their suffix offsets. e.g.\n\n 'Banana', 'anana', 'nana', 'ana'\n "
choice_ = self.clean(choice)
tokens = re.split(self.options.choice_tokenizer, choice_)
if self.options.index_suffix:
return [token[i:] for token in tokens for i in range((len(token) - 2))]
else:
return tokens | 7,454,731,504,844,039,000 | Splits the `choice` into a series of tokens based on
the user's criteria.
If suffix indexing is enabled, the individual tokens
are further broken down and indexed by their suffix offsets. e.g.
'Banana', 'anana', 'nana', 'ana' | gooey/gui/components/filtering/prefix_filter.py | tokenizeChoice | QuantumSpatialInc/Gooey | python | def tokenizeChoice(self, choice):
"\n Splits the `choice` into a series of tokens based on\n the user's criteria.\n\n If suffix indexing is enabled, the individual tokens\n are further broken down and indexed by their suffix offsets. e.g.\n\n 'Banana', 'anana', 'nana', 'ana'\n "
choice_ = self.clean(choice)
tokens = re.split(self.options.choice_tokenizer, choice_)
if self.options.index_suffix:
return [token[i:] for token in tokens for i in range((len(token) - 2))]
else:
return tokens |
def decov(h, reduce='half_squared_sum'):
"Computes the DeCov loss of ``h``\n\n The output is a variable whose value depends on the value of\n the option ``reduce``. If it is ``'no'``, it holds a matrix\n whose size is same as the number of columns of ``y``.\n If it is ``'half_squared_sum'``, it holds the half of the\n squared Frobenius norm (i.e. squared of the L2 norm of a matrix flattened\n to a vector) of the matrix.\n\n Args:\n h (:class:`~chainer.Variable` or :ref:`ndarray`):\n Variable holding a matrix where the first dimension\n corresponds to the batches.\n recude (str): Reduction option. Its value must be either\n ``'half_squared_sum'`` or ``'no'``.\n Otherwise, :class:`ValueError` is raised.\n\n Returns:\n ~chainer.Variable:\n A variable holding a scalar of the DeCov loss.\n If ``reduce`` is ``'no'``, the output variable holds\n 2-dimensional array matrix of shape ``(N, N)`` where\n ``N`` is the number of columns of ``y``.\n If it is ``'half_squared_sum'``, the output variable\n holds a scalar value.\n\n .. note::\n\n See https://arxiv.org/abs/1511.06068 for details.\n\n "
return DeCov(reduce)(h) | 6,244,738,837,472,731,000 | Computes the DeCov loss of ``h``
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds a matrix
whose size is same as the number of columns of ``y``.
If it is ``'half_squared_sum'``, it holds the half of the
squared Frobenius norm (i.e. squared of the L2 norm of a matrix flattened
to a vector) of the matrix.
Args:
h (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a matrix where the first dimension
corresponds to the batches.
recude (str): Reduction option. Its value must be either
``'half_squared_sum'`` or ``'no'``.
Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding a scalar of the DeCov loss.
If ``reduce`` is ``'no'``, the output variable holds
2-dimensional array matrix of shape ``(N, N)`` where
``N`` is the number of columns of ``y``.
If it is ``'half_squared_sum'``, the output variable
holds a scalar value.
.. note::
See https://arxiv.org/abs/1511.06068 for details. | chainer/functions/loss/decov.py | decov | Anyz01/chainer | python | def decov(h, reduce='half_squared_sum'):
"Computes the DeCov loss of ``h``\n\n The output is a variable whose value depends on the value of\n the option ``reduce``. If it is ``'no'``, it holds a matrix\n whose size is same as the number of columns of ``y``.\n If it is ``'half_squared_sum'``, it holds the half of the\n squared Frobenius norm (i.e. squared of the L2 norm of a matrix flattened\n to a vector) of the matrix.\n\n Args:\n h (:class:`~chainer.Variable` or :ref:`ndarray`):\n Variable holding a matrix where the first dimension\n corresponds to the batches.\n recude (str): Reduction option. Its value must be either\n ``'half_squared_sum'`` or ``'no'``.\n Otherwise, :class:`ValueError` is raised.\n\n Returns:\n ~chainer.Variable:\n A variable holding a scalar of the DeCov loss.\n If ``reduce`` is ``'no'``, the output variable holds\n 2-dimensional array matrix of shape ``(N, N)`` where\n ``N`` is the number of columns of ``y``.\n If it is ``'half_squared_sum'``, the output variable\n holds a scalar value.\n\n .. note::\n\n See https://arxiv.org/abs/1511.06068 for details.\n\n "
return DeCov(reduce)(h) |
def _findFirstTraceInsideTensorFlowPyLibrary(self, op):
'Find the first trace of an op that belongs to the TF Python library.'
for trace in op.traceback:
if source_utils.guess_is_tensorflow_py_library(trace.filename):
return trace | -7,241,050,063,364,547,000 | Find the first trace of an op that belongs to the TF Python library. | tensorflow/python/debug/lib/source_remote_test.py | _findFirstTraceInsideTensorFlowPyLibrary | 05259/tensorflow | python | def _findFirstTraceInsideTensorFlowPyLibrary(self, op):
for trace in op.traceback:
if source_utils.guess_is_tensorflow_py_library(trace.filename):
return trace |
def testGRPCServerMessageSizeLimit(self):
'Assert gRPC debug server is started with unlimited message size.'
with test.mock.patch.object(grpc, 'server', wraps=grpc.server) as mock_grpc_server:
(_, _, _, server_thread, server) = grpc_debug_test_server.start_server_on_separate_thread(poll_server=True)
mock_grpc_server.assert_called_with(test.mock.ANY, options=[('grpc.max_receive_message_length', (- 1)), ('grpc.max_send_message_length', (- 1))])
server.stop_server().wait()
server_thread.join() | -3,176,832,388,540,558,000 | Assert gRPC debug server is started with unlimited message size. | tensorflow/python/debug/lib/source_remote_test.py | testGRPCServerMessageSizeLimit | 05259/tensorflow | python | def testGRPCServerMessageSizeLimit(self):
with test.mock.patch.object(grpc, 'server', wraps=grpc.server) as mock_grpc_server:
(_, _, _, server_thread, server) = grpc_debug_test_server.start_server_on_separate_thread(poll_server=True)
mock_grpc_server.assert_called_with(test.mock.ANY, options=[('grpc.max_receive_message_length', (- 1)), ('grpc.max_send_message_length', (- 1))])
server.stop_server().wait()
server_thread.join() |
def list_combinations_generator(modalities: list):
'Generates combinations for items in the given list.\n\n Args:\n modalities: List of modalities available in the dataset.\n\n Returns:\n Combinations of items in the given list.\n '
modality_combinations = list()
for length in range(1, (len(modalities) + 1)):
current_length_combinations = itertools.combinations(modalities, length)
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations | 4,698,962,698,077,386,000 | Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list. | codes/model_training_testing.py | list_combinations_generator | preetham-ganesh/multi-sensor-human-activity-recognition | python | def list_combinations_generator(modalities: list):
'Generates combinations for items in the given list.\n\n Args:\n modalities: List of modalities available in the dataset.\n\n Returns:\n Combinations of items in the given list.\n '
modality_combinations = list()
for length in range(1, (len(modalities) + 1)):
current_length_combinations = itertools.combinations(modalities, length)
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations |
def data_combiner(n_actions: int, subject_ids: list, n_takes: int, modalities: list, skeleton_pose_model: str):
'Combines skeleton point information for all actions, all takes, given list of subject ids and given list of\n modalities.\n\n Args:\n n_actions: Total number of actions in the original dataset.\n subject_ids: List of subjects in the current set.\n n_takes: Total number of takes in the original dataset.\n modalities: Current combination of modalities.\n skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point\n information.\n\n Returns:\n A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list\n of subject ids and given list of modalities.\n '
combined_modality_skeleton_information = pd.DataFrame()
for i in range(1, (n_actions + 1)):
for j in range(len(subject_ids)):
for k in range(1, (n_takes + 1)):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
try:
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
if (len(modalities) != 1):
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information, current_skeleton_point_information, on='frame', how='outer')
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(current_data_name_modality_information))]
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
current_data_name_modality_information['action'] = [i for _ in range(len(current_data_name_modality_information))]
combined_modality_skeleton_information = combined_modality_skeleton_information.append(current_data_name_modality_information)
return combined_modality_skeleton_information | -6,621,214,251,104,755,000 | Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities. | codes/model_training_testing.py | data_combiner | preetham-ganesh/multi-sensor-human-activity-recognition | python | def data_combiner(n_actions: int, subject_ids: list, n_takes: int, modalities: list, skeleton_pose_model: str):
'Combines skeleton point information for all actions, all takes, given list of subject ids and given list of\n modalities.\n\n Args:\n n_actions: Total number of actions in the original dataset.\n subject_ids: List of subjects in the current set.\n n_takes: Total number of takes in the original dataset.\n modalities: Current combination of modalities.\n skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point\n information.\n\n Returns:\n A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list\n of subject ids and given list of modalities.\n '
combined_modality_skeleton_information = pd.DataFrame()
for i in range(1, (n_actions + 1)):
for j in range(len(subject_ids)):
for k in range(1, (n_takes + 1)):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
try:
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
if (len(modalities) != 1):
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information, current_skeleton_point_information, on='frame', how='outer')
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(current_data_name_modality_information))]
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
current_data_name_modality_information['action'] = [i for _ in range(len(current_data_name_modality_information))]
combined_modality_skeleton_information = combined_modality_skeleton_information.append(current_data_name_modality_information)
return combined_modality_skeleton_information |
def calculate_metrics(actual_values: np.ndarray, predicted_values: np.ndarray):
'Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,\n and f1 scores.\n\n Args:\n actual_values: Actual action labels in the dataset\n predicted_values: Action labels predicted by the currently trained model\n\n Returns:\n Dictionary contains keys as score names and values as scores which are floating point values.\n '
return {'accuracy_score': round((accuracy_score(actual_values, predicted_values) * 100), 3), 'balanced_accuracy_score': round((balanced_accuracy_score(actual_values, predicted_values) * 100), 3), 'precision_score': round((precision_score(actual_values, predicted_values, average='weighted', labels=np.unique(predicted_values)) * 100), 3), 'recall_score': round((recall_score(actual_values, predicted_values, average='weighted', labels=np.unique(predicted_values)) * 100), 3), 'f1_score': round((f1_score(actual_values, predicted_values, average='weighted', labels=np.unique(predicted_values)) * 100), 3)} | -4,365,684,577,823,481,300 | Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values. | codes/model_training_testing.py | calculate_metrics | preetham-ganesh/multi-sensor-human-activity-recognition | python | def calculate_metrics(actual_values: np.ndarray, predicted_values: np.ndarray):
'Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,\n and f1 scores.\n\n Args:\n actual_values: Actual action labels in the dataset\n predicted_values: Action labels predicted by the currently trained model\n\n Returns:\n Dictionary contains keys as score names and values as scores which are floating point values.\n '
return {'accuracy_score': round((accuracy_score(actual_values, predicted_values) * 100), 3), 'balanced_accuracy_score': round((balanced_accuracy_score(actual_values, predicted_values) * 100), 3), 'precision_score': round((precision_score(actual_values, predicted_values, average='weighted', labels=np.unique(predicted_values)) * 100), 3), 'recall_score': round((recall_score(actual_values, predicted_values, average='weighted', labels=np.unique(predicted_values)) * 100), 3), 'f1_score': round((f1_score(actual_values, predicted_values, average='weighted', labels=np.unique(predicted_values)) * 100), 3)} |
def retrieve_hyperparameters(current_model_name: str):
'Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).\n\n Args:\n current_model_name: Name of the model currently expected to be trained\n\n Returns:\n A dictionary containing the hyperparameter name and the values that will be used to optimize the model\n '
if (current_model_name == 'support_vector_classifier'):
parameters = {'kernel': ['linear', 'poly', 'rbf']}
elif (current_model_name == 'decision_tree_classifier'):
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
elif ((current_model_name == 'random_forest_classifier') or (current_model_name == 'extra_trees_classifier')):
parameters = {'n_estimators': [(i * 10) for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'], 'max_depth': [2, 3, 4, 5, 6, 7]}
elif (current_model_name == 'gradient_boosting_classifier'):
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [(i * 10) for i in range(2, 11, 2)]}
else:
parameters = {'None': ['None']}
return parameters | 2,844,473,804,344,919,600 | Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model | codes/model_training_testing.py | retrieve_hyperparameters | preetham-ganesh/multi-sensor-human-activity-recognition | python | def retrieve_hyperparameters(current_model_name: str):
'Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).\n\n Args:\n current_model_name: Name of the model currently expected to be trained\n\n Returns:\n A dictionary containing the hyperparameter name and the values that will be used to optimize the model\n '
if (current_model_name == 'support_vector_classifier'):
parameters = {'kernel': ['linear', 'poly', 'rbf']}
elif (current_model_name == 'decision_tree_classifier'):
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
elif ((current_model_name == 'random_forest_classifier') or (current_model_name == 'extra_trees_classifier')):
parameters = {'n_estimators': [(i * 10) for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'], 'max_depth': [2, 3, 4, 5, 6, 7]}
elif (current_model_name == 'gradient_boosting_classifier'):
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [(i * 10) for i in range(2, 11, 2)]}
else:
parameters = {'None': ['None']}
return parameters |
def split_data_input_target(skeleton_data: pd.DataFrame):
'Splits skeleton_data into input and target datasets by filtering / selecting certain columns.\n\n Args:\n skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.\n\n Returns:\n A tuple containing 2 numpy ndarrays for the input and target datasets.\n '
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return (np.array(skeleton_data_input), np.array(skeleton_data_target)) | -172,952,320,064,424,670 | Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets. | codes/model_training_testing.py | split_data_input_target | preetham-ganesh/multi-sensor-human-activity-recognition | python | def split_data_input_target(skeleton_data: pd.DataFrame):
'Splits skeleton_data into input and target datasets by filtering / selecting certain columns.\n\n Args:\n skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.\n\n Returns:\n A tuple containing 2 numpy ndarrays for the input and target datasets.\n '
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return (np.array(skeleton_data_input), np.array(skeleton_data_target)) |
def video_based_model_testing(test_skeleton_information: pd.DataFrame, current_model: sklearn):
'Tests performance of the currently trained model on the validation or testing sets, where the performance is\n evaluated per video / file, instead of evaluating per frame.\n\n Args:\n test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,\n subject_ids, and takes in the validation or testing sets.\n current_model: Scikit-learn model that is currently being trained and tested.\n\n Returns:\n A tuple contains the target and predicted action for each video in the validation / testing set.\n '
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
for i in range(len(test_data_names)):
current_data_name_skeleton_information = test_skeleton_information[(test_skeleton_information['data_name'] == test_data_names[i])]
(test_skeleton_input_data, test_skeleton_target_data) = split_data_input_target(current_data_name_skeleton_information)
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return (np.array(test_target_data), np.array(test_predicted_data)) | 3,758,275,840,654,412,000 | Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set. | codes/model_training_testing.py | video_based_model_testing | preetham-ganesh/multi-sensor-human-activity-recognition | python | def video_based_model_testing(test_skeleton_information: pd.DataFrame, current_model: sklearn):
'Tests performance of the currently trained model on the validation or testing sets, where the performance is\n evaluated per video / file, instead of evaluating per frame.\n\n Args:\n test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,\n subject_ids, and takes in the validation or testing sets.\n current_model: Scikit-learn model that is currently being trained and tested.\n\n Returns:\n A tuple contains the target and predicted action for each video in the validation / testing set.\n '
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
for i in range(len(test_data_names)):
current_data_name_skeleton_information = test_skeleton_information[(test_skeleton_information['data_name'] == test_data_names[i])]
(test_skeleton_input_data, test_skeleton_target_data) = split_data_input_target(current_data_name_skeleton_information)
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return (np.array(test_target_data), np.array(test_predicted_data)) |
def model_training_testing(train_skeleton_information: pd.DataFrame, validation_skeleton_information: pd.DataFrame, test_skeleton_information: pd.DataFrame, current_model_name: str, parameters: dict):
'Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and\n validation_skeleton_information.\n\n Args:\n train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,\n subject_ids, and takes in the Training set.\n validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,\n subject_ids, and takes in the Validation set.\n test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,\n subject_ids, and takes in the Test set.\n current_model_name: Name of the model currently expected to be trained.\n parameters: Current parameter values used for training and validating the model.\n\n Returns:\n A tuple which contains the training metrics, validation metrics, & test metrics.\n '
if (current_model_name == 'support_vector_classifier'):
model = SVC(kernel=parameters['kernel'])
elif (current_model_name == 'decision_tree_classifier'):
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'], max_depth=parameters['max_depth'])
elif (current_model_name == 'random_forest_classifier'):
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'], max_depth=parameters['max_depth'])
elif (current_model_name == 'extra_trees_classifier'):
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'], max_depth=parameters['max_depth'])
elif (current_model_name == 'gradient_boosting_classifier'):
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
(train_skeleton_input_data, train_skeleton_target_data) = split_data_input_target(train_skeleton_information)
model.fit(train_skeleton_input_data, train_skeleton_target_data)
(train_skeleton_target_data, train_skeleton_predicted_data) = video_based_model_testing(train_skeleton_information, model)
(validation_skeleton_target_data, validation_skeleton_predicted_data) = video_based_model_testing(validation_skeleton_information, model)
(test_skeleton_target_data, test_skeleton_predicted_data) = video_based_model_testing(test_skeleton_information, model)
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return (train_metrics, validation_metrics, test_metrics) | 8,992,935,888,324,668,000 | Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics. | codes/model_training_testing.py | model_training_testing | preetham-ganesh/multi-sensor-human-activity-recognition | python | def model_training_testing(train_skeleton_information: pd.DataFrame, validation_skeleton_information: pd.DataFrame, test_skeleton_information: pd.DataFrame, current_model_name: str, parameters: dict):
'Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and\n validation_skeleton_information.\n\n Args:\n train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,\n subject_ids, and takes in the Training set.\n validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,\n subject_ids, and takes in the Validation set.\n test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,\n subject_ids, and takes in the Test set.\n current_model_name: Name of the model currently expected to be trained.\n parameters: Current parameter values used for training and validating the model.\n\n Returns:\n A tuple which contains the training metrics, validation metrics, & test metrics.\n '
if (current_model_name == 'support_vector_classifier'):
model = SVC(kernel=parameters['kernel'])
elif (current_model_name == 'decision_tree_classifier'):
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'], max_depth=parameters['max_depth'])
elif (current_model_name == 'random_forest_classifier'):
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'], max_depth=parameters['max_depth'])
elif (current_model_name == 'extra_trees_classifier'):
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'], max_depth=parameters['max_depth'])
elif (current_model_name == 'gradient_boosting_classifier'):
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
(train_skeleton_input_data, train_skeleton_target_data) = split_data_input_target(train_skeleton_information)
model.fit(train_skeleton_input_data, train_skeleton_target_data)
(train_skeleton_target_data, train_skeleton_predicted_data) = video_based_model_testing(train_skeleton_information, model)
(validation_skeleton_target_data, validation_skeleton_predicted_data) = video_based_model_testing(validation_skeleton_information, model)
(test_skeleton_target_data, test_skeleton_predicted_data) = video_based_model_testing(test_skeleton_information, model)
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return (train_metrics, validation_metrics, test_metrics) |
def per_combination_results_export(combination_name: str, data_split: str, metrics_dataframe: pd.DataFrame):
'Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,\n then the folder is created.\n\n Args:\n combination_name: Name of the current combination of modalities and skeleton pose model.\n data_split: Name of the split the subset of the dataset belongs to.\n metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.\n\n Returns:\n None.\n '
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if (not os.path.isdir(directory_path)):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False) | -7,363,266,707,070,975,000 | Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None. | codes/model_training_testing.py | per_combination_results_export | preetham-ganesh/multi-sensor-human-activity-recognition | python | def per_combination_results_export(combination_name: str, data_split: str, metrics_dataframe: pd.DataFrame):
'Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,\n then the folder is created.\n\n Args:\n combination_name: Name of the current combination of modalities and skeleton pose model.\n data_split: Name of the split the subset of the dataset belongs to.\n metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.\n\n Returns:\n None.\n '
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if (not os.path.isdir(directory_path)):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False) |
def appends_parameter_metrics_combination(current_model_name: str, current_combination_name: str, current_split_metrics: dict, split_metrics_dataframe: pd.DataFrame):
'Appends the metrics for the current model and current parameter combination to the main dataframe.\n\n Args:\n current_model_name: Name of the model currently being trained.\n current_combination_name: Current combination of parameters used for training the model.\n current_split_metrics: Metrics for the current parameter combination for the model.\n split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.\n\n Returns:\n Updated version of the pandas dataframe which contains metrics for the current combination of modalities.\n '
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe | 8,218,222,589,331,363,000 | Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities. | codes/model_training_testing.py | appends_parameter_metrics_combination | preetham-ganesh/multi-sensor-human-activity-recognition | python | def appends_parameter_metrics_combination(current_model_name: str, current_combination_name: str, current_split_metrics: dict, split_metrics_dataframe: pd.DataFrame):
'Appends the metrics for the current model and current parameter combination to the main dataframe.\n\n Args:\n current_model_name: Name of the model currently being trained.\n current_combination_name: Current combination of parameters used for training the model.\n current_split_metrics: Metrics for the current parameter combination for the model.\n split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.\n\n Returns:\n Updated version of the pandas dataframe which contains metrics for the current combination of modalities.\n '
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe |
def per_combination_model_training_testing(train_subject_ids: list, validation_subject_ids: list, test_subject_ids: list, n_actions: int, n_takes: int, current_combination_modalities: list, skeleton_pose_model: str, model_names: list):
'Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and\n tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.\n\n Args:\n train_subject_ids: List of subject ids in the training set.\n validation_subject_ids: List of subject ids in the validation set.\n test_subject_ids: List of subject ids in the testing set.\n n_actions: Total number of actions in the original dataset.\n n_takes: Total number of takes in the original dataset.\n current_combination_modalities: Current combination of modalities which will be used to import and combine\n the dataset.\n skeleton_pose_model: Name of the model currently used for extracting skeleton model.\n model_names: List of ML classifier model names which will used creating the objects.\n\n Returns:\n None.\n '
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities, skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes, current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities, skeleton_pose_model)
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=(['model_names', 'parameters'] + metrics_features))
validation_models_parameters_metrics = pd.DataFrame(columns=(['model_names', 'parameters'] + metrics_features))
test_models_parameters_metrics = pd.DataFrame(columns=(['model_names', 'parameters'] + metrics_features))
combination_name = '_'.join((current_combination_modalities + [skeleton_pose_model]))
for i in range(len(model_names)):
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in parameters_grid[j].keys()])
(training_metrics, validation_metrics, test_metrics) = model_training_testing(train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i], parameters_grid[j])
train_models_parameters_metrics = appends_parameter_metrics_combination(model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if (model_names[i] != 'gaussian_naive_bayes'):
print('modality_combination={}, model={}, {} completed successfully.'.format(combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name, model_names[i]))
per_combination_results_export('_'.join((current_combination_modalities + [skeleton_pose_model])), 'train_metrics', train_models_parameters_metrics)
per_combination_results_export('_'.join((current_combination_modalities + [skeleton_pose_model])), 'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join((current_combination_modalities + [skeleton_pose_model])), 'test_metrics', test_models_parameters_metrics) | -7,157,611,953,615,802,000 | Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None. | codes/model_training_testing.py | per_combination_model_training_testing | preetham-ganesh/multi-sensor-human-activity-recognition | python | def per_combination_model_training_testing(train_subject_ids: list, validation_subject_ids: list, test_subject_ids: list, n_actions: int, n_takes: int, current_combination_modalities: list, skeleton_pose_model: str, model_names: list):
'Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and\n tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.\n\n Args:\n train_subject_ids: List of subject ids in the training set.\n validation_subject_ids: List of subject ids in the validation set.\n test_subject_ids: List of subject ids in the testing set.\n n_actions: Total number of actions in the original dataset.\n n_takes: Total number of takes in the original dataset.\n current_combination_modalities: Current combination of modalities which will be used to import and combine\n the dataset.\n skeleton_pose_model: Name of the model currently used for extracting skeleton model.\n model_names: List of ML classifier model names which will used creating the objects.\n\n Returns:\n None.\n '
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities, skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes, current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities, skeleton_pose_model)
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=(['model_names', 'parameters'] + metrics_features))
validation_models_parameters_metrics = pd.DataFrame(columns=(['model_names', 'parameters'] + metrics_features))
test_models_parameters_metrics = pd.DataFrame(columns=(['model_names', 'parameters'] + metrics_features))
combination_name = '_'.join((current_combination_modalities + [skeleton_pose_model]))
for i in range(len(model_names)):
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in parameters_grid[j].keys()])
(training_metrics, validation_metrics, test_metrics) = model_training_testing(train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i], parameters_grid[j])
train_models_parameters_metrics = appends_parameter_metrics_combination(model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if (model_names[i] != 'gaussian_naive_bayes'):
print('modality_combination={}, model={}, {} completed successfully.'.format(combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name, model_names[i]))
per_combination_results_export('_'.join((current_combination_modalities + [skeleton_pose_model])), 'train_metrics', train_models_parameters_metrics)
per_combination_results_export('_'.join((current_combination_modalities + [skeleton_pose_model])), 'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join((current_combination_modalities + [skeleton_pose_model])), 'test_metrics', test_models_parameters_metrics) |
def __init__(self, multi_scale=False, multi_image_sizes=(320, 352, 384, 416, 448, 480, 512, 544, 576, 608), misc_effect=None, visual_effect=None, batch_size=1, group_method='ratio', shuffle_groups=True, input_size=512, max_objects=100):
"\n Initialize Generator object.\n\n Args:\n batch_size: The size of the batches to generate.\n group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).\n shuffle_groups: If True, shuffles the groups each epoch.\n input_size:\n max_objects:\n "
self.misc_effect = misc_effect
self.visual_effect = visual_effect
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.input_size = input_size
self.output_size = (self.input_size // 4)
self.max_objects = max_objects
self.groups = None
self.multi_scale = multi_scale
self.multi_image_sizes = multi_image_sizes
self.current_index = 0
self.group_images()
if self.shuffle_groups:
random.shuffle(self.groups) | 1,858,173,405,841,341,700 | Initialize Generator object.
Args:
batch_size: The size of the batches to generate.
group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups: If True, shuffles the groups each epoch.
input_size:
max_objects: | generators/common.py | __init__ | lbcsept/keras-CenterNet | python | def __init__(self, multi_scale=False, multi_image_sizes=(320, 352, 384, 416, 448, 480, 512, 544, 576, 608), misc_effect=None, visual_effect=None, batch_size=1, group_method='ratio', shuffle_groups=True, input_size=512, max_objects=100):
"\n Initialize Generator object.\n\n Args:\n batch_size: The size of the batches to generate.\n group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).\n shuffle_groups: If True, shuffles the groups each epoch.\n input_size:\n max_objects:\n "
self.misc_effect = misc_effect
self.visual_effect = visual_effect
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.input_size = input_size
self.output_size = (self.input_size // 4)
self.max_objects = max_objects
self.groups = None
self.multi_scale = multi_scale
self.multi_image_sizes = multi_image_sizes
self.current_index = 0
self.group_images()
if self.shuffle_groups:
random.shuffle(self.groups) |
def size(self):
'\n Size of the dataset.\n '
raise NotImplementedError('size method not implemented') | 2,519,970,720,400,613,400 | Size of the dataset. | generators/common.py | size | lbcsept/keras-CenterNet | python | def size(self):
'\n \n '
raise NotImplementedError('size method not implemented') |
def num_classes(self):
'\n Number of classes in the dataset.\n '
raise NotImplementedError('num_classes method not implemented') | 2,245,586,942,049,278,500 | Number of classes in the dataset. | generators/common.py | num_classes | lbcsept/keras-CenterNet | python | def num_classes(self):
'\n \n '
raise NotImplementedError('num_classes method not implemented') |
def has_label(self, label):
'\n Returns True if label is a known label.\n '
raise NotImplementedError('has_label method not implemented') | 8,231,604,603,183,398,000 | Returns True if label is a known label. | generators/common.py | has_label | lbcsept/keras-CenterNet | python | def has_label(self, label):
'\n \n '
raise NotImplementedError('has_label method not implemented') |
def has_name(self, name):
'\n Returns True if name is a known class.\n '
raise NotImplementedError('has_name method not implemented') | 5,509,816,958,451,983,000 | Returns True if name is a known class. | generators/common.py | has_name | lbcsept/keras-CenterNet | python | def has_name(self, name):
'\n \n '
raise NotImplementedError('has_name method not implemented') |
def name_to_label(self, name):
'\n Map name to label.\n '
raise NotImplementedError('name_to_label method not implemented') | -3,816,862,996,482,635,300 | Map name to label. | generators/common.py | name_to_label | lbcsept/keras-CenterNet | python | def name_to_label(self, name):
'\n \n '
raise NotImplementedError('name_to_label method not implemented') |
def label_to_name(self, label):
'\n Map label to name.\n '
raise NotImplementedError('label_to_name method not implemented') | 5,471,730,362,505,122,000 | Map label to name. | generators/common.py | label_to_name | lbcsept/keras-CenterNet | python | def label_to_name(self, label):
'\n \n '
raise NotImplementedError('label_to_name method not implemented') |
def image_aspect_ratio(self, image_index):
'\n Compute the aspect ratio for an image with image_index.\n '
raise NotImplementedError('image_aspect_ratio method not implemented') | 5,160,404,832,456,020,000 | Compute the aspect ratio for an image with image_index. | generators/common.py | image_aspect_ratio | lbcsept/keras-CenterNet | python | def image_aspect_ratio(self, image_index):
'\n \n '
raise NotImplementedError('image_aspect_ratio method not implemented') |
def load_image(self, image_index):
'\n Load an image at the image_index.\n '
raise NotImplementedError('load_image method not implemented') | -7,955,758,498,265,859,000 | Load an image at the image_index. | generators/common.py | load_image | lbcsept/keras-CenterNet | python | def load_image(self, image_index):
'\n \n '
raise NotImplementedError('load_image method not implemented') |
def load_annotations(self, image_index):
'\n Load annotations for an image_index.\n '
raise NotImplementedError('load_annotations method not implemented') | -7,710,779,890,866,678,000 | Load annotations for an image_index. | generators/common.py | load_annotations | lbcsept/keras-CenterNet | python | def load_annotations(self, image_index):
'\n \n '
raise NotImplementedError('load_annotations method not implemented') |
def load_annotations_group(self, group):
'\n Load annotations for all images in group.\n '
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert isinstance(annotations, dict), "'load_annotations' should return a list of dictionaries, received: {}".format(type(annotations))
assert ('labels' in annotations), "'load_annotations' should return a list of dictionaries that contain 'labels' and 'bboxes'."
assert ('bboxes' in annotations), "'load_annotations' should return a list of dictionaries that contain 'labels' and 'bboxes'."
return annotations_group | 1,904,131,570,393,643,300 | Load annotations for all images in group. | generators/common.py | load_annotations_group | lbcsept/keras-CenterNet | python | def load_annotations_group(self, group):
'\n \n '
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert isinstance(annotations, dict), "'load_annotations' should return a list of dictionaries, received: {}".format(type(annotations))
assert ('labels' in annotations), "'load_annotations' should return a list of dictionaries that contain 'labels' and 'bboxes'."
assert ('bboxes' in annotations), "'load_annotations' should return a list of dictionaries that contain 'labels' and 'bboxes'."
return annotations_group |
def filter_annotations(self, image_group, annotations_group, group):
'\n Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.\n '
for (index, (image, annotations)) in enumerate(zip(image_group, annotations_group)):
invalid_indices = np.where(((((((((annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) | (annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1])) | (annotations['bboxes'][:, 0] < 0)) | (annotations['bboxes'][:, 1] < 0)) | (annotations['bboxes'][:, 2] <= 0)) | (annotations['bboxes'][:, 3] <= 0)) | (annotations['bboxes'][:, 2] > image.shape[1])) | (annotations['bboxes'][:, 3] > image.shape[0])))[0]
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(group[index], image.shape, annotations['bboxes'][invalid_indices, :]))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
if (annotations['bboxes'].shape[0] == 0):
warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(group[index], image.shape))
return (image_group, annotations_group) | -5,141,735,004,164,665,000 | Filter annotations by removing those that are outside of the image bounds or whose width/height < 0. | generators/common.py | filter_annotations | lbcsept/keras-CenterNet | python | def filter_annotations(self, image_group, annotations_group, group):
'\n \n '
for (index, (image, annotations)) in enumerate(zip(image_group, annotations_group)):
invalid_indices = np.where(((((((((annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) | (annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1])) | (annotations['bboxes'][:, 0] < 0)) | (annotations['bboxes'][:, 1] < 0)) | (annotations['bboxes'][:, 2] <= 0)) | (annotations['bboxes'][:, 3] <= 0)) | (annotations['bboxes'][:, 2] > image.shape[1])) | (annotations['bboxes'][:, 3] > image.shape[0])))[0]
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(group[index], image.shape, annotations['bboxes'][invalid_indices, :]))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
if (annotations['bboxes'].shape[0] == 0):
warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(group[index], image.shape))
return (image_group, annotations_group) |
def clip_transformed_annotations(self, image_group, annotations_group, group):
'\n Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.\n '
filtered_image_group = []
filtered_annotations_group = []
for (index, (image, annotations)) in enumerate(zip(image_group, annotations_group)):
image_height = image.shape[0]
image_width = image.shape[1]
annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, (image_width - 2))
annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, (image_height - 2))
annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, (image_width - 1))
annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, (image_height - 1))
small_indices = np.where((((annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0]) < 10) | ((annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1]) < 10)))[0]
if len(small_indices):
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)
if (annotations_group[index]['bboxes'].shape[0] != 0):
filtered_image_group.append(image)
filtered_annotations_group.append(annotations_group[index])
else:
warnings.warn('Image with id {} (shape {}) contains no valid boxes after transform'.format(group[index], image.shape))
return (filtered_image_group, filtered_annotations_group) | 4,534,439,093,873,950,700 | Filter annotations by removing those that are outside of the image bounds or whose width/height < 0. | generators/common.py | clip_transformed_annotations | lbcsept/keras-CenterNet | python | def clip_transformed_annotations(self, image_group, annotations_group, group):
'\n \n '
filtered_image_group = []
filtered_annotations_group = []
for (index, (image, annotations)) in enumerate(zip(image_group, annotations_group)):
image_height = image.shape[0]
image_width = image.shape[1]
annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, (image_width - 2))
annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, (image_height - 2))
annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, (image_width - 1))
annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, (image_height - 1))
small_indices = np.where((((annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0]) < 10) | ((annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1]) < 10)))[0]
if len(small_indices):
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)
if (annotations_group[index]['bboxes'].shape[0] != 0):
filtered_image_group.append(image)
filtered_annotations_group.append(annotations_group[index])
else:
warnings.warn('Image with id {} (shape {}) contains no valid boxes after transform'.format(group[index], image.shape))
return (filtered_image_group, filtered_annotations_group) |
def load_image_group(self, group):
'\n Load images for all images in a group.\n '
return [self.load_image(image_index) for image_index in group] | -208,212,597,319,730,600 | Load images for all images in a group. | generators/common.py | load_image_group | lbcsept/keras-CenterNet | python | def load_image_group(self, group):
'\n \n '
return [self.load_image(image_index) for image_index in group] |
def random_visual_effect_group_entry(self, image, annotations):
'\n Randomly transforms image and annotation.\n '
image = self.visual_effect(image)
return (image, annotations) | -7,949,354,188,122,564,000 | Randomly transforms image and annotation. | generators/common.py | random_visual_effect_group_entry | lbcsept/keras-CenterNet | python | def random_visual_effect_group_entry(self, image, annotations):
'\n \n '
image = self.visual_effect(image)
return (image, annotations) |
def random_visual_effect_group(self, image_group, annotations_group):
'\n Randomly apply visual effect on each image.\n '
assert (len(image_group) == len(annotations_group))
if (self.visual_effect is None):
return (image_group, annotations_group)
for index in range(len(image_group)):
(image_group[index], annotations_group[index]) = self.random_visual_effect_group_entry(image_group[index], annotations_group[index])
return (image_group, annotations_group) | 6,606,122,371,543,051,000 | Randomly apply visual effect on each image. | generators/common.py | random_visual_effect_group | lbcsept/keras-CenterNet | python | def random_visual_effect_group(self, image_group, annotations_group):
'\n \n '
assert (len(image_group) == len(annotations_group))
if (self.visual_effect is None):
return (image_group, annotations_group)
for index in range(len(image_group)):
(image_group[index], annotations_group[index]) = self.random_visual_effect_group_entry(image_group[index], annotations_group[index])
return (image_group, annotations_group) |
def random_transform_group_entry(self, image, annotations, transform=None):
'\n Randomly transforms image and annotation.\n '
if ((transform is not None) or self.transform_generator):
if (transform is None):
transform = adjust_transform_for_image(next(self.transform_generator), image, self.transform_parameters.relative_translation)
image = apply_transform(transform, image, self.transform_parameters)
annotations['bboxes'] = annotations['bboxes'].copy()
for index in range(annotations['bboxes'].shape[0]):
annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :])
return (image, annotations) | 3,619,452,415,224,716,000 | Randomly transforms image and annotation. | generators/common.py | random_transform_group_entry | lbcsept/keras-CenterNet | python | def random_transform_group_entry(self, image, annotations, transform=None):
'\n \n '
if ((transform is not None) or self.transform_generator):
if (transform is None):
transform = adjust_transform_for_image(next(self.transform_generator), image, self.transform_parameters.relative_translation)
image = apply_transform(transform, image, self.transform_parameters)
annotations['bboxes'] = annotations['bboxes'].copy()
for index in range(annotations['bboxes'].shape[0]):
annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :])
return (image, annotations) |
def random_transform_group(self, image_group, annotations_group):
'\n Randomly transforms each image and its annotations.\n '
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
(image_group[index], annotations_group[index]) = self.random_transform_group_entry(image_group[index], annotations_group[index])
return (image_group, annotations_group) | -6,856,956,971,389,891,000 | Randomly transforms each image and its annotations. | generators/common.py | random_transform_group | lbcsept/keras-CenterNet | python | def random_transform_group(self, image_group, annotations_group):
'\n \n '
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
(image_group[index], annotations_group[index]) = self.random_transform_group_entry(image_group[index], annotations_group[index])
return (image_group, annotations_group) |
def random_misc_group_entry(self, image, annotations):
'\n Randomly transforms image and annotation.\n '
assert (annotations['bboxes'].shape[0] != 0)
(image, boxes) = self.misc_effect(image, annotations['bboxes'])
annotations['bboxes'] = boxes
return (image, annotations) | -9,044,219,135,588,454,000 | Randomly transforms image and annotation. | generators/common.py | random_misc_group_entry | lbcsept/keras-CenterNet | python | def random_misc_group_entry(self, image, annotations):
'\n \n '
assert (annotations['bboxes'].shape[0] != 0)
(image, boxes) = self.misc_effect(image, annotations['bboxes'])
annotations['bboxes'] = boxes
return (image, annotations) |
def random_misc_group(self, image_group, annotations_group):
'\n Randomly transforms each image and its annotations.\n '
assert (len(image_group) == len(annotations_group))
if (self.misc_effect is None):
return (image_group, annotations_group)
for index in range(len(image_group)):
(image_group[index], annotations_group[index]) = self.random_misc_group_entry(image_group[index], annotations_group[index])
return (image_group, annotations_group) | 3,756,365,868,291,788,000 | Randomly transforms each image and its annotations. | generators/common.py | random_misc_group | lbcsept/keras-CenterNet | python | def random_misc_group(self, image_group, annotations_group):
'\n \n '
assert (len(image_group) == len(annotations_group))
if (self.misc_effect is None):
return (image_group, annotations_group)
for index in range(len(image_group)):
(image_group[index], annotations_group[index]) = self.random_misc_group_entry(image_group[index], annotations_group[index])
return (image_group, annotations_group) |
def preprocess_group_entry(self, image, annotations):
'\n Preprocess image and its annotations.\n '
(image, scale, offset_h, offset_w) = self.preprocess_image(image)
annotations['bboxes'] *= scale
annotations['bboxes'][:, [0, 2]] += offset_w
annotations['bboxes'][:, [1, 3]] += offset_h
return (image, annotations) | -2,648,293,636,791,352,300 | Preprocess image and its annotations. | generators/common.py | preprocess_group_entry | lbcsept/keras-CenterNet | python | def preprocess_group_entry(self, image, annotations):
'\n \n '
(image, scale, offset_h, offset_w) = self.preprocess_image(image)
annotations['bboxes'] *= scale
annotations['bboxes'][:, [0, 2]] += offset_w
annotations['bboxes'][:, [1, 3]] += offset_h
return (image, annotations) |
def preprocess_group(self, image_group, annotations_group):
'\n Preprocess each image and its annotations in its group.\n '
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
(image_group[index], annotations_group[index]) = self.preprocess_group_entry(image_group[index], annotations_group[index])
return (image_group, annotations_group) | -3,169,902,642,537,334,300 | Preprocess each image and its annotations in its group. | generators/common.py | preprocess_group | lbcsept/keras-CenterNet | python | def preprocess_group(self, image_group, annotations_group):
'\n \n '
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
(image_group[index], annotations_group[index]) = self.preprocess_group_entry(image_group[index], annotations_group[index])
return (image_group, annotations_group) |
def group_images(self):
'\n Order the images according to self.order and makes groups of self.batch_size.\n '
order = list(range(self.size()))
if (self.group_method == 'random'):
random.shuffle(order)
elif (self.group_method == 'ratio'):
order.sort(key=(lambda x: self.image_aspect_ratio(x)))
self.groups = [[order[(x % len(order))] for x in range(i, (i + self.batch_size))] for i in range(0, len(order), self.batch_size)] | -2,192,540,384,019,374,800 | Order the images according to self.order and makes groups of self.batch_size. | generators/common.py | group_images | lbcsept/keras-CenterNet | python | def group_images(self):
'\n \n '
order = list(range(self.size()))
if (self.group_method == 'random'):
random.shuffle(order)
elif (self.group_method == 'ratio'):
order.sort(key=(lambda x: self.image_aspect_ratio(x)))
self.groups = [[order[(x % len(order))] for x in range(i, (i + self.batch_size))] for i in range(0, len(order), self.batch_size)] |
def compute_inputs(self, image_group, annotations_group):
'\n Compute inputs for the network using an image_group.\n '
batch_images = np.zeros((len(image_group), self.input_size, self.input_size, 3), dtype=np.float32)
batch_hms = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()), dtype=np.float32)
batch_hms_2 = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()), dtype=np.float32)
batch_whs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_regs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_reg_masks = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
batch_indices = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
for (b, (image, annotations)) in enumerate(zip(image_group, annotations_group)):
c = np.array([(image.shape[1] / 2.0), (image.shape[0] / 2.0)], dtype=np.float32)
s = (max(image.shape[0], image.shape[1]) * 1.0)
trans_input = get_affine_transform(c, s, self.input_size)
image = self.preprocess_image(image, c, s, tgt_w=self.input_size, tgt_h=self.input_size)
batch_images[b] = image
bboxes = annotations['bboxes']
assert (bboxes.shape[0] != 0)
class_ids = annotations['labels']
assert (class_ids.shape[0] != 0)
trans_output = get_affine_transform(c, s, self.output_size)
for i in range(bboxes.shape[0]):
bbox = bboxes[i].copy()
cls_id = class_ids[i]
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, (self.output_size - 1))
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, (self.output_size - 1))
(h, w) = ((bbox[3] - bbox[1]), (bbox[2] - bbox[0]))
if ((h > 0) and (w > 0)):
(radius_h, radius_w) = gaussian_radius((math.ceil(h), math.ceil(w)))
radius_h = max(0, int(radius_h))
radius_w = max(0, int(radius_w))
radius = gaussian_radius_2((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array([((bbox[0] + bbox[2]) / 2), ((bbox[1] + bbox[3]) / 2)], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(batch_hms[b, :, :, cls_id], ct_int, radius_h, radius_w)
draw_gaussian_2(batch_hms_2[b, :, :, cls_id], ct_int, radius)
batch_whs[(b, i)] = ((1.0 * w), (1.0 * h))
batch_indices[(b, i)] = ((ct_int[1] * self.output_size) + ct_int[0])
batch_regs[(b, i)] = (ct - ct_int)
batch_reg_masks[(b, i)] = 1
return [batch_images, batch_hms_2, batch_whs, batch_regs, batch_reg_masks, batch_indices] | -7,551,723,626,296,321,000 | Compute inputs for the network using an image_group. | generators/common.py | compute_inputs | lbcsept/keras-CenterNet | python | def compute_inputs(self, image_group, annotations_group):
'\n \n '
batch_images = np.zeros((len(image_group), self.input_size, self.input_size, 3), dtype=np.float32)
batch_hms = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()), dtype=np.float32)
batch_hms_2 = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()), dtype=np.float32)
batch_whs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_regs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_reg_masks = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
batch_indices = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
for (b, (image, annotations)) in enumerate(zip(image_group, annotations_group)):
c = np.array([(image.shape[1] / 2.0), (image.shape[0] / 2.0)], dtype=np.float32)
s = (max(image.shape[0], image.shape[1]) * 1.0)
trans_input = get_affine_transform(c, s, self.input_size)
image = self.preprocess_image(image, c, s, tgt_w=self.input_size, tgt_h=self.input_size)
batch_images[b] = image
bboxes = annotations['bboxes']
assert (bboxes.shape[0] != 0)
class_ids = annotations['labels']
assert (class_ids.shape[0] != 0)
trans_output = get_affine_transform(c, s, self.output_size)
for i in range(bboxes.shape[0]):
bbox = bboxes[i].copy()
cls_id = class_ids[i]
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, (self.output_size - 1))
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, (self.output_size - 1))
(h, w) = ((bbox[3] - bbox[1]), (bbox[2] - bbox[0]))
if ((h > 0) and (w > 0)):
(radius_h, radius_w) = gaussian_radius((math.ceil(h), math.ceil(w)))
radius_h = max(0, int(radius_h))
radius_w = max(0, int(radius_w))
radius = gaussian_radius_2((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array([((bbox[0] + bbox[2]) / 2), ((bbox[1] + bbox[3]) / 2)], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(batch_hms[b, :, :, cls_id], ct_int, radius_h, radius_w)
draw_gaussian_2(batch_hms_2[b, :, :, cls_id], ct_int, radius)
batch_whs[(b, i)] = ((1.0 * w), (1.0 * h))
batch_indices[(b, i)] = ((ct_int[1] * self.output_size) + ct_int[0])
batch_regs[(b, i)] = (ct - ct_int)
batch_reg_masks[(b, i)] = 1
return [batch_images, batch_hms_2, batch_whs, batch_regs, batch_reg_masks, batch_indices] |
def compute_targets(self, image_group, annotations_group):
'\n Compute target outputs for the network using images and their annotations.\n '
return np.zeros((len(image_group),)) | -4,948,169,037,549,393,000 | Compute target outputs for the network using images and their annotations. | generators/common.py | compute_targets | lbcsept/keras-CenterNet | python | def compute_targets(self, image_group, annotations_group):
'\n \n '
return np.zeros((len(image_group),)) |
def compute_inputs_targets(self, group):
'\n Compute inputs and target outputs for the network.\n '
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
(image_group, annotations_group) = self.filter_annotations(image_group, annotations_group, group)
(image_group, annotations_group) = self.random_visual_effect_group(image_group, annotations_group)
(image_group, annotations_group) = self.random_misc_group(image_group, annotations_group)
if (len(image_group) == 0):
return (None, None)
inputs = self.compute_inputs(image_group, annotations_group)
targets = self.compute_targets(image_group, annotations_group)
return (inputs, targets) | 3,504,434,119,494,047,000 | Compute inputs and target outputs for the network. | generators/common.py | compute_inputs_targets | lbcsept/keras-CenterNet | python | def compute_inputs_targets(self, group):
'\n \n '
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
(image_group, annotations_group) = self.filter_annotations(image_group, annotations_group, group)
(image_group, annotations_group) = self.random_visual_effect_group(image_group, annotations_group)
(image_group, annotations_group) = self.random_misc_group(image_group, annotations_group)
if (len(image_group) == 0):
return (None, None)
inputs = self.compute_inputs(image_group, annotations_group)
targets = self.compute_targets(image_group, annotations_group)
return (inputs, targets) |
def __len__(self):
'\n Number of batches for generator.\n '
return len(self.groups) | 4,036,216,262,415,912,000 | Number of batches for generator. | generators/common.py | __len__ | lbcsept/keras-CenterNet | python | def __len__(self):
'\n \n '
return len(self.groups) |
def __getitem__(self, index):
'\n Keras sequence method for generating batches.\n '
group = self.groups[self.current_index]
if self.multi_scale:
if ((self.current_index % 10) == 0):
random_size_index = np.random.randint(0, len(self.multi_image_sizes))
self.image_size = self.multi_image_sizes[random_size_index]
(inputs, targets) = self.compute_inputs_targets(group)
while (inputs is None):
current_index = (self.current_index + 1)
if (current_index >= len(self.groups)):
current_index = (current_index % len(self.groups))
self.current_index = current_index
group = self.groups[self.current_index]
(inputs, targets) = self.compute_inputs_targets(group)
current_index = (self.current_index + 1)
if (current_index >= len(self.groups)):
current_index = (current_index % len(self.groups))
self.current_index = current_index
return (inputs, targets) | 2,202,590,093,009,340,400 | Keras sequence method for generating batches. | generators/common.py | __getitem__ | lbcsept/keras-CenterNet | python | def __getitem__(self, index):
'\n \n '
group = self.groups[self.current_index]
if self.multi_scale:
if ((self.current_index % 10) == 0):
random_size_index = np.random.randint(0, len(self.multi_image_sizes))
self.image_size = self.multi_image_sizes[random_size_index]
(inputs, targets) = self.compute_inputs_targets(group)
while (inputs is None):
current_index = (self.current_index + 1)
if (current_index >= len(self.groups)):
current_index = (current_index % len(self.groups))
self.current_index = current_index
group = self.groups[self.current_index]
(inputs, targets) = self.compute_inputs_targets(group)
current_index = (self.current_index + 1)
if (current_index >= len(self.groups)):
current_index = (current_index % len(self.groups))
self.current_index = current_index
return (inputs, targets) |
def test_fas():
'\n Testing based upon the work provided in\n https://github.com/arkottke/notebooks/blob/master/effective_amp_spectrum.ipynb\n '
ddir = os.path.join('data', 'testdata')
datadir = pkg_resources.resource_filename('gmprocess', ddir)
fas_file = os.path.join(datadir, 'fas_greater_of_two_horizontals.pkl')
p1 = os.path.join(datadir, 'peer', 'RSN763_LOMAP_GIL067.AT2')
p2 = os.path.join(datadir, 'peer', 'RSN763_LOMAP_GIL337.AT2')
stream = StationStream([])
for (idx, fpath) in enumerate([p1, p2]):
with open(fpath, encoding='utf-8') as file_obj:
for _ in range(3):
next(file_obj)
meta = re.findall('[.0-9]+', next(file_obj))
dt = float(meta[1])
accels = np.array([col for line in file_obj for col in line.split()])
trace = StationTrace(data=accels, header={'channel': ('H' + str(idx)), 'delta': dt, 'units': 'acc', 'standard': {'corner_frequency': np.nan, 'station_name': '', 'source': 'json', 'instrument': '', 'instrument_period': np.nan, 'source_format': 'json', 'comments': '', 'structure_type': '', 'sensor_serial_number': '', 'source_file': '', 'process_level': 'raw counts', 'process_time': '', 'horizontal_orientation': np.nan, 'vertical_orientation': np.nan, 'units': 'acc', 'units_type': 'acc', 'instrument_sensitivity': np.nan, 'instrument_damping': np.nan}})
stream.append(trace)
for tr in stream:
response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
tr.setProvenance('remove_response', response)
target_df = pd.read_pickle(fas_file)
ind_vals = target_df.index.values
per = np.unique([float(i[0].split(')')[0].split('(')[1]) for i in ind_vals])
freqs = (1 / per)
imts = [('fas' + str(p)) for p in per]
summary = StationSummary.from_stream(stream, ['greater_of_two_horizontals'], imts, bandwidth=30)
pgms = summary.pgms
for (idx, f) in enumerate(freqs):
fstr = ('FAS(%.3f)' % (1 / f))
fval1 = pgms.loc[(fstr, 'GREATER_OF_TWO_HORIZONTALS')].Result
fval2 = target_df.loc[(fstr, 'GREATER_OF_TWO_HORIZONTALS')].Result
np.testing.assert_allclose(fval1, fval2, rtol=1e-05, atol=1e-05) | 5,979,378,271,937,405,000 | Testing based upon the work provided in
https://github.com/arkottke/notebooks/blob/master/effective_amp_spectrum.ipynb | tests/gmprocess/metrics/imt/fas_greater_of_two_test.py | test_fas | jrekoske-usgs/groundmotion-processing | python | def test_fas():
'\n Testing based upon the work provided in\n https://github.com/arkottke/notebooks/blob/master/effective_amp_spectrum.ipynb\n '
ddir = os.path.join('data', 'testdata')
datadir = pkg_resources.resource_filename('gmprocess', ddir)
fas_file = os.path.join(datadir, 'fas_greater_of_two_horizontals.pkl')
p1 = os.path.join(datadir, 'peer', 'RSN763_LOMAP_GIL067.AT2')
p2 = os.path.join(datadir, 'peer', 'RSN763_LOMAP_GIL337.AT2')
stream = StationStream([])
for (idx, fpath) in enumerate([p1, p2]):
with open(fpath, encoding='utf-8') as file_obj:
for _ in range(3):
next(file_obj)
meta = re.findall('[.0-9]+', next(file_obj))
dt = float(meta[1])
accels = np.array([col for line in file_obj for col in line.split()])
trace = StationTrace(data=accels, header={'channel': ('H' + str(idx)), 'delta': dt, 'units': 'acc', 'standard': {'corner_frequency': np.nan, 'station_name': , 'source': 'json', 'instrument': , 'instrument_period': np.nan, 'source_format': 'json', 'comments': , 'structure_type': , 'sensor_serial_number': , 'source_file': , 'process_level': 'raw counts', 'process_time': , 'horizontal_orientation': np.nan, 'vertical_orientation': np.nan, 'units': 'acc', 'units_type': 'acc', 'instrument_sensitivity': np.nan, 'instrument_damping': np.nan}})
stream.append(trace)
for tr in stream:
response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
tr.setProvenance('remove_response', response)
target_df = pd.read_pickle(fas_file)
ind_vals = target_df.index.values
per = np.unique([float(i[0].split(')')[0].split('(')[1]) for i in ind_vals])
freqs = (1 / per)
imts = [('fas' + str(p)) for p in per]
summary = StationSummary.from_stream(stream, ['greater_of_two_horizontals'], imts, bandwidth=30)
pgms = summary.pgms
for (idx, f) in enumerate(freqs):
fstr = ('FAS(%.3f)' % (1 / f))
fval1 = pgms.loc[(fstr, 'GREATER_OF_TWO_HORIZONTALS')].Result
fval2 = target_df.loc[(fstr, 'GREATER_OF_TWO_HORIZONTALS')].Result
np.testing.assert_allclose(fval1, fval2, rtol=1e-05, atol=1e-05) |
def transcribe_audio(project_slug, creds, overwrite=False):
'\n project_slug: ./projects/audiostreams/filename.wav\n '
watson_jobs = []
audio_fn = (project_slug + '.wav')
print(('audio_filename:' + audio_fn))
time_slug = make_slug_from_path(audio_fn)
transcript_fn = (join(transcripts_dir(project_slug), time_slug) + '.json')
print(('transcript_fn' + transcript_fn))
if (not exists(transcript_fn)):
print('Sending to Watson API:\n\t', audio_fn)
job = Process(target=process_transcript_call, args=(audio_fn, transcript_fn, creds))
job.start()
watson_jobs.append(job)
for job in watson_jobs:
job.join()
return transcript_fn | 1,367,059,603,446,845,000 | project_slug: ./projects/audiostreams/filename.wav | watsoncloud/foo/high.py | transcribe_audio | audip/youtubeseek | python | def transcribe_audio(project_slug, creds, overwrite=False):
'\n \n '
watson_jobs = []
audio_fn = (project_slug + '.wav')
print(('audio_filename:' + audio_fn))
time_slug = make_slug_from_path(audio_fn)
transcript_fn = (join(transcripts_dir(project_slug), time_slug) + '.json')
print(('transcript_fn' + transcript_fn))
if (not exists(transcript_fn)):
print('Sending to Watson API:\n\t', audio_fn)
job = Process(target=process_transcript_call, args=(audio_fn, transcript_fn, creds))
job.start()
watson_jobs.append(job)
for job in watson_jobs:
job.join()
return transcript_fn |
@abstractmethod
def _plot_init(self):
'Setup MPL figure display with empty data.'
pass | -3,701,202,470,411,182,000 | Setup MPL figure display with empty data. | src/pymor/discretizers/builtin/gui/matplotlib.py | _plot_init | TreeerT/pymor | python | @abstractmethod
def _plot_init(self):
pass |
Subsets and Splits