repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
tijsmaas/Graph-WaveNet
|
[
"353ea28687b425c584039278b5705e2a3651094d"
] |
[
"model_static.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport sys\n\n\n# RNN model for every sensor\nclass StaticNet(nn.Module):\n def __init__(self, device, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addaptadj=True, aptinit=None, in_dim=2,out_dim=12,residual_channels=32,dilation_channels=32,skip_channels=256,end_channels=512,kernel_size=2,blocks=4,layers=2):\n super(StaticNet, self).__init__()\n self.dropout = dropout\n self.blocks = blocks\n self.layers = layers\n self.gcn_bool = gcn_bool\n self.addaptadj = addaptadj\n\n self.hidden = 32\n self.hidden_fc = 100\n self.receptive_field = 1\n\n self.b = torch.zeros(1, requires_grad=True).to(device)\n self.conv1 = nn.Conv2d(1, 1, kernel_size=1).to(device)\n\n self.supports = supports\n\n self.timeframes = out_dim + self.receptive_field\n\n\n# input: [batch, vals, sensors, measurements]\n def forward(self, input):\n batch = input.size(0) # 64\n vals = input.size(1) # 2\n sensors = input.size(2) #207\n in_len = input.size(3)\n x = input\n\n # Take most recent measurement values\n all_sensors_input = x[:,0,:,-1]\n # Copy values of last timestep to all predictions [64, 207] -> [64, 1, 207, 12]\n static_val = all_sensors_input.view(batch, sensors, 1, 1)\n\n y = torch.arange(batch * self.timeframes * sensors).view(batch, sensors, self.timeframes, 1).cuda()\n a, b = torch.broadcast_tensors(static_val, y)\n\n # output.fill_(static_val * self.b)\n # [64, 5, 13, 1]\n output = a.transpose(1,2) + self.b\n # [64, 13, 5, 1]\n\n output = output[:, 1:, :, :]\n return output\n\n\n\n @classmethod\n def from_args(cls, args, device, supports, aptinit, **kwargs):\n defaults = dict(dropout=args.dropout, supports=supports,\n addaptadj=args.addaptadj, aptinit=aptinit,\n in_dim=args.in_dim, out_dim=args.seq_length,\n residual_channels=args.nhid, dilation_channels=args.nhid)\n defaults.update(**kwargs)\n model = cls(device, args.num_nodes, **defaults)\n return model\n\n"
] |
[
[
"torch.broadcast_tensors",
"torch.nn.Conv2d",
"torch.arange",
"torch.zeros"
]
] |
tzemicheal/cuml
|
[
"377f3c2773e3fc64d93d3d64a3f2fcd6c8759044",
"377f3c2773e3fc64d93d3d64a3f2fcd6c8759044"
] |
[
"python/cuml/test/experimental/test_explainer_permutation_shap.py",
"python/cuml/_thirdparty/sklearn/preprocessing/_data.py"
] |
[
"#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nimport cuml\nimport cuml.experimental.explainer\nimport cupy as cp\nimport numpy as np\nimport pytest\nimport sklearn.neighbors\n\nfrom cuml.test.utils import ClassEnumerator\nfrom sklearn.datasets import make_classification\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\n\nmodels_config = ClassEnumerator(module=cuml)\nmodels = models_config.get_models()\n\n\[email protected](scope=\"module\")\ndef exact_tests_dataset():\n X, y = make_regression(n_samples=101,\n n_features=11,\n noise=0.1,\n random_state=42)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=1, random_state=42)\n\n X_train = X_train.astype(np.float32)\n X_test = X_test.astype(np.float32)\n y_train = y_train.astype(np.float32)\n y_test = y_test.astype(np.float32)\n return X_train, X_test, y_train, y_test\n\n\n###############################################################################\n# End to end tests #\n###############################################################################\n\n\[email protected](\"model\", [cuml.LinearRegression,\n cuml.KNeighborsRegressor,\n cuml.SVR])\ndef test_regression_datasets(exact_tests_dataset, model):\n # in general permutation shap does not behave as predictable as\n # kernel shap, even when comparing permutation against kernel SHAP of the\n # mainline SHAP package. So these tests assure us that we're doing the\n # correct calculations, even if we can't compare directly.\n X_train, X_test, y_train, y_test = exact_tests_dataset\n\n mod = model().fit(X_train, y_train)\n\n explainer = cuml.experimental.explainer.PermutationExplainer(\n model=mod.predict,\n masker=X_train)\n\n cu_shap_values = explainer.shap_values(X_test)\n\n exp_v = float(explainer.expected_value)\n fx = mod.predict(X_test)\n assert (np.sum(cp.asnumpy(cu_shap_values)) - abs(fx - exp_v)) <= 1e-5\n\n skmod = cuml_skl_class_dict[model]().fit(X_train, y_train)\n\n explainer = cuml.experimental.explainer.KernelExplainer(\n model=skmod.predict,\n data=X_train)\n\n skl_shap_values = explainer.shap_values(X_test)\n exp_v = float(explainer.expected_value)\n fx = mod.predict(X_test)\n assert (np.sum(cp.asnumpy(skl_shap_values)) - abs(fx - exp_v)) <= 1e-5\n\n\ndef test_exact_classification_datasets():\n X, y = make_classification(n_samples=101,\n n_features=11,\n random_state=42,\n n_informative=2,\n n_classes=2)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=1, random_state=42)\n\n X_train = X_train.astype(np.float32)\n X_test = X_test.astype(np.float32)\n y_train = y_train.astype(np.float32)\n y_test = y_test.astype(np.float32)\n\n mod = cuml.SVC(probability=True).fit(X_train, y_train)\n\n explainer = cuml.experimental.explainer.PermutationExplainer(\n model=mod.predict_proba,\n masker=X_train)\n\n cu_shap_values = explainer.shap_values(X_test)\n\n exp_v = explainer.expected_value\n fx = mod.predict_proba(X_test)[0]\n assert (np.sum(cp.asnumpy(\n cu_shap_values[0])) - abs(fx[0] - exp_v[0])) <= 1e-5\n assert (np.sum(cp.asnumpy(\n cu_shap_values[1])) - abs(fx[1] - exp_v[1])) <= 1e-5\n\n mod = sklearn.svm.SVC(probability=True).fit(X_train, y_train)\n\n explainer = cuml.experimental.explainer.PermutationExplainer(\n model=mod.predict_proba,\n masker=X_train)\n\n skl_shap_values = explainer.shap_values(X_test)\n\n exp_v = explainer.expected_value\n fx = mod.predict_proba(X_test)[0]\n assert (np.sum(cp.asnumpy(\n skl_shap_values[0])) - abs(fx[0] - exp_v[0])) <= 1e-5\n assert (np.sum(cp.asnumpy(\n skl_shap_values[1])) - abs(fx[1] - exp_v[1])) <= 1e-5\n\n\[email protected](\"dtype\", [np.float32, np.float64])\[email protected](\"nfeatures\", [11, 50])\[email protected](\"nbackground\", [10, 50])\[email protected](\"model\", [cuml.LinearRegression,\n cuml.SVR])\[email protected](\"npermutations\", [5, 50])\ndef test_different_parameters(dtype, nfeatures, nbackground, model,\n npermutations):\n X, y = cuml.datasets.make_regression(n_samples=nbackground + 5,\n n_features=nfeatures,\n noise=0.1)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=5, random_state=42)\n\n X_train = X_train.astype(dtype)\n X_test = X_test.astype(dtype)\n y_train = y_train.astype(dtype)\n y_test = y_test.astype(dtype)\n\n mod = model().fit(X_train, y_train)\n\n cu_explainer = \\\n cuml.experimental.explainer.PermutationExplainer(model=mod.predict,\n masker=X_train,\n is_gpu_model=True)\n\n cu_shap_values = cu_explainer.shap_values(X_test,\n npermutations=npermutations)\n\n exp_v = float(cu_explainer.expected_value)\n fx = mod.predict(X_test)\n for i in range(5):\n assert 0.99 <= (abs(np.sum(cp.asnumpy(\n cu_shap_values[i]))) / abs(fx[i] - exp_v)) <= 1.01\n\n\n###############################################################################\n# Functional tests #\n###############################################################################\n\ndef test_not_shuffled_explanation(exact_tests_dataset):\n # in general permutation shap does not behave as predictable as\n # kernel shap, even when comparing permutation against kernel SHAP of the\n # mainline SHAP package. So these tests assure us that we're doing the\n # correct calculations, even if we can't compare directly.\n X_train, X_test, y_train, y_test = exact_tests_dataset\n\n mod = cuml.LinearRegression().fit(X_train, y_train)\n\n explainer = cuml.experimental.explainer.PermutationExplainer(\n model=mod.predict,\n masker=X_train)\n\n shap_values = explainer._explain(\n X_test,\n npermutations=1,\n main_effects=False,\n testing=True\n )\n\n assert np.allclose(shap_values, not_shuffled_shap_values,\n rtol=1e-04, atol=1e-04)\n\n\n# Test against exact shap values for linear regression\n# 1 permutation should give exact result\ndef test_permutation(exact_tests_dataset):\n X_train, X_test, y_train, y_test = exact_tests_dataset\n # Train arbitrary model to get some coefficients\n mod = cuml.LinearRegression().fit(X_train, y_train)\n # Single background and foreground instance\n # Gives zero effect to features when they are 'off'\n # and the effect of the regression coefficient when they are 'on'\n X_background = np.zeros((1, X_train.shape[1]))\n X_foreground = np.ones((1, X_train.shape[1]))\n explainer = cuml.experimental.explainer.PermutationExplainer(\n model=mod.predict,\n masker=X_background)\n\n shap_values = explainer._explain(\n X_foreground,\n npermutations=5,\n main_effects=False\n )\n\n assert np.allclose(mod.coef_, shap_values, rtol=1e-04, atol=1e-04)\n\n\n###############################################################################\n# Precomputed results #\n# and testing variables #\n###############################################################################\n\ncuml_skl_class_dict = {\n cuml.LinearRegression: sklearn.linear_model.LinearRegression,\n cuml.KNeighborsRegressor: sklearn.neighbors.KNeighborsRegressor,\n cuml.SVR: sklearn.svm.SVR\n}\n\n# values were precomputed with python code and with a modified version\n# of SHAP's permutationExplainer that did not shuffle the indexes for the\n# permutations, giving us a test of the calculations in our implementation\nnot_shuffled_shap_values = [\n -3.60017776e-01, -1.02140656e+02, 1.29915714e+00, -6.30791473e+01,\n 2.47955322e-04, -2.31356430e+00, -1.01764305e+02, 3.39929199e+00,\n 4.10347061e+01, 7.13340759e+01, -1.60478973e+00\n]\n",
"# Original authors from Sckit-Learn:\n# Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Olivier Grisel <[email protected]>\n# Andreas Mueller <[email protected]>\n# Eric Martin <[email protected]>\n# Giorgio Patrini <[email protected]>\n# Eric Chang <[email protected]>\n# License: BSD 3 clause\n\n\n# This code originates from the Scikit-Learn library,\n# it was since modified to allow GPU acceleration.\n# This code is under BSD 3 clause license.\n# Authors mentioned above do not endorse or promote this production.\n\n\nfrom itertools import chain, combinations\nimport numbers\nimport warnings\nfrom itertools import combinations_with_replacement as combinations_w_r\n\nimport cupy as np\nfrom cupy import sparse\nfrom scipy import stats\nfrom scipy import optimize\nfrom scipy.special import boxcox\n\nfrom ..utils.skl_dependencies import BaseEstimator, TransformerMixin\nfrom ....thirdparty_adapters import check_array, get_input_type, \\\n to_output_type\nfrom ..utils.extmath import row_norms\nfrom ..utils.extmath import _incremental_mean_and_var\nfrom ..utils.validation import (check_is_fitted, check_random_state,\n FLOAT_DTYPES, _deprecate_positional_args)\n\nfrom ..utils.sparsefuncs import (inplace_column_scale,\n min_max_axis,\n mean_variance_axis)\n\nfrom ....thirdparty_adapters.sparsefuncs_fast import \\\n (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2,\n csr_polynomial_expansion)\nfrom ....common.import_utils import check_cupy8\n\n\nBOUNDS_THRESHOLD = 1e-7\n\n__all__ = [\n 'Binarizer',\n 'KernelCenterer',\n 'MinMaxScaler',\n 'MaxAbsScaler',\n 'Normalizer',\n 'RobustScaler',\n 'StandardScaler',\n 'QuantileTransformer',\n 'PowerTransformer',\n 'add_dummy_feature',\n 'binarize',\n 'normalize',\n 'scale',\n 'robust_scale',\n 'maxabs_scale',\n 'minmax_scale',\n 'quantile_transform',\n 'power_transform',\n]\n\n\ndef _handle_zeros_in_scale(scale, copy=True):\n ''' Makes sure that whenever scale is zero, we handle it correctly.\n\n This happens in most scalers when we have constant features.'''\n\n # if we are fitting on 1D arrays, scale might be a scalar\n if np.isscalar(scale):\n if scale == .0:\n scale = 1.\n return scale\n elif isinstance(scale, np.ndarray):\n if copy:\n # New array to avoid side-effects\n scale = scale.copy()\n scale[scale == 0.0] = 1.0\n return scale\n\n\n@_deprecate_positional_args\ndef scale(X, *, axis=0, with_mean=True, with_std=True, copy=True):\n \"\"\"Standardize a dataset along any axis\n\n Center to the mean and component wise scale to unit variance.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n The data to center and scale.\n\n axis : int (0 by default)\n axis used to compute the means and standard deviations along. If 0,\n independently standardize each feature, otherwise (if 1) standardize\n each sample.\n\n with_mean : boolean, True by default\n If True, center the data before scaling.\n\n with_std : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n copy : boolean, optional, default True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n Notes\n -----\n This implementation will refuse to center sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_mean=False` (in that case, only variance scaling will be\n performed on the features of the sparse matrix) or to densify the matrix\n if he/she expects the materialized dense array to fit in memory.\n\n For optimal processing the caller should pass a CSC matrix.\n\n NaNs are treated as missing values: disregarded to compute the statistics,\n and maintained during the data transformation.\n\n We use a biased estimator for the standard deviation, equivalent to\n `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to\n affect model performance.\n\n See also\n --------\n StandardScaler: Performs scaling to unit variance using the``Transformer`` API\n\n \"\"\" # noqa\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy,\n ensure_2d=False, estimator='the scale function',\n dtype=FLOAT_DTYPES, force_all_finite='allow-nan')\n\n if sparse.issparse(X):\n if with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` instead\"\n \" See docstring for motivation and alternatives.\")\n if axis != 0:\n raise ValueError(\"Can only scale sparse matrix on axis=0, \"\n \" got axis=%d\" % axis)\n if with_std:\n _, var = mean_variance_axis(X, axis=0)\n var = _handle_zeros_in_scale(var, copy=False)\n inplace_column_scale(X, 1 / np.sqrt(var))\n else:\n X = np.asarray(X)\n if with_mean:\n mean_ = np.nanmean(X, axis)\n if with_std:\n scale_ = np.nanstd(X, axis)\n # Xr is a view on the original array that enables easy use of\n # broadcasting on the axis in which we are interested in\n Xr = np.rollaxis(X, axis)\n if with_mean:\n Xr -= mean_\n mean_1 = np.nanmean(Xr, axis=0)\n # Verify that mean_1 is 'close to zero'. If X contains very\n # large values, mean_1 can also be very large, due to a lack of\n # precision of mean_. In this case, a pre-scaling of the\n # concerned feature is efficient, for instance by its mean or\n # maximum.\n if not np.allclose(mean_1, 0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when centering the data \"\n \"and might not be solved. Dataset may \"\n \"contain too large values. You may need \"\n \"to prescale your features.\")\n Xr -= mean_1\n if with_std:\n scale_ = _handle_zeros_in_scale(scale_, copy=False)\n Xr /= scale_\n if with_mean:\n mean_2 = np.nanmean(Xr, axis=0)\n # If mean_2 is not 'close to zero', it comes from the fact that\n # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even\n # if mean_1 was close to zero. The problem is thus essentially\n # due to the lack of precision of mean_. A solution is then to\n # subtract the mean again:\n if not np.allclose(mean_2, 0):\n warnings.warn(\"Numerical issues were encountered \"\n \"when scaling the data \"\n \"and might not be solved. The standard \"\n \"deviation of the data is probably \"\n \"very close to 0. \")\n Xr -= mean_2\n\n X = to_output_type(X, output_type)\n return X\n\n\nclass MinMaxScaler(TransformerMixin, BaseEstimator):\n \"\"\"Transform features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, e.g. between\n zero and one.\n\n The transformation is given by::\n\n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Parameters\n ----------\n feature_range : tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n copy : bool, default=True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n Attributes\n ----------\n min_ : ndarray of shape (n_features,)\n Per feature adjustment for minimum. Equivalent to\n ``min - X.min(axis=0) * self.scale_``\n\n scale_ : ndarray of shape (n_features,)\n Per feature relative scaling of the data. Equivalent to\n ``(max - min) / (X.max(axis=0) - X.min(axis=0))``\n\n data_min_ : ndarray of shape (n_features,)\n Per feature minimum seen in the data\n\n data_max_ : ndarray of shape (n_features,)\n Per feature maximum seen in the data\n\n data_range_ : ndarray of shape (n_features,)\n Per feature range ``(data_max_ - data_min_)`` seen in the data\n\n n_samples_seen_ : int\n The number of samples processed by the estimator.\n It will be reset on new calls to fit, but increments across\n ``partial_fit`` calls.\n\n Examples\n --------\n >>> from cuml.preprocessing import MinMaxScaler\n >>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]\n >>> scaler = MinMaxScaler()\n >>> print(scaler.fit(data))\n MinMaxScaler()\n >>> print(scaler.data_max_)\n [ 1. 18.]\n >>> print(scaler.transform(data))\n [[0. 0. ]\n [0.25 0.25]\n [0.5 0.5 ]\n [1. 1. ]]\n >>> print(scaler.transform([[2, 2]]))\n [[1.5 0. ]]\n\n See also\n --------\n minmax_scale: Equivalent function without the estimator API.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n \"\"\"\n\n @_deprecate_positional_args\n def __init__(self, feature_range=(0, 1), *, copy=True):\n self.feature_range = feature_range\n self.copy = copy\n\n def _reset(self):\n \"\"\"Reset internal data-dependent state of the scaler, if necessary.\n\n __init__ parameters are not touched.\n \"\"\"\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.min_\n del self.n_samples_seen_\n del self.data_min_\n del self.data_max_\n del self.data_range_\n\n def fit(self, X, y=None):\n \"\"\"Compute the minimum and maximum to be used for later scaling.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Fitted scaler.\n \"\"\"\n\n # Reset internal state before fitting\n self._reset()\n return self.partial_fit(X, y)\n\n def partial_fit(self, X, y=None):\n \"\"\"Online computation of min and max on X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Transformer instance.\n \"\"\"\n feature_range = self.feature_range\n if feature_range[0] >= feature_range[1]:\n raise ValueError(\"Minimum of desired feature range must be smaller\"\n \" than maximum. Got %s.\" % str(feature_range))\n\n first_pass = not hasattr(self, 'n_samples_seen_')\n X = self._validate_data(X, reset=first_pass,\n estimator=self, dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\")\n\n data_min = np.nanmin(X, axis=0)\n data_max = np.nanmax(X, axis=0)\n\n if first_pass:\n self.n_samples_seen_ = X.shape[0]\n else:\n data_min = np.minimum(self.data_min_, data_min)\n data_max = np.maximum(self.data_max_, data_max)\n self.n_samples_seen_ += X.shape[0]\n\n data_range = data_max - data_min\n self.scale_ = ((feature_range[1] - feature_range[0]) /\n _handle_zeros_in_scale(data_range))\n self.min_ = feature_range[0] - data_min * self.scale_\n self.data_min_ = data_min\n self.data_max_ = data_max\n self.data_range_ = data_range\n return self\n\n def transform(self, X):\n \"\"\"Scale features of X according to feature_range.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data that will be transformed.\n\n Returns\n -------\n Xt : array-like of shape (n_samples, n_features)\n Transformed data.\n \"\"\"\n check_is_fitted(self)\n\n output_type = get_input_type(X)\n X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\")\n\n X *= self.scale_\n X += self.min_\n\n X = to_output_type(X, output_type)\n return X\n\n def inverse_transform(self, X):\n \"\"\"Undo the scaling of X according to feature_range.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data that will be transformed. It cannot be sparse.\n\n Returns\n -------\n Xt : array-like of shape (n_samples, n_features)\n Transformed data.\n \"\"\"\n check_is_fitted(self)\n\n output_type = get_input_type(X)\n X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES,\n force_all_finite=\"allow-nan\")\n\n X -= self.min_\n X /= self.scale_\n\n X = to_output_type(X, output_type)\n return X\n\n def _more_tags(self):\n return {'allow_nan': True}\n\n\n@_deprecate_positional_args\ndef minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True):\n \"\"\"Transform features by scaling each feature to a given range.\n\n This estimator scales and translates each feature individually such\n that it is in the given range on the training set, i.e. between\n zero and one.\n\n The transformation is given by (when ``axis=0``)::\n\n X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n X_scaled = X_std * (max - min) + min\n\n where min, max = feature_range.\n\n The transformation is calculated as (when ``axis=0``)::\n\n X_scaled = scale * X + min - X.min(axis=0) * scale\n where scale = (max - min) / (X.max(axis=0) - X.min(axis=0))\n\n This transformation is often used as an alternative to zero mean,\n unit variance scaling.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data.\n\n feature_range : tuple (min, max), default=(0, 1)\n Desired range of transformed data.\n\n axis : int, default=0\n Axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : bool, default=True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n See also\n --------\n MinMaxScaler: Performs scaling to a given range using the``Transformer`` API\n \"\"\" # noqa\n # Unlike the scaler object, this function allows 1d input.\n # If copy is required, it will be done inside the scaler object.\n\n output_type = get_input_type(X)\n X = check_array(X, copy=False, ensure_2d=False,\n dtype=FLOAT_DTYPES, force_all_finite='allow-nan')\n original_ndim = X.ndim\n\n if original_ndim == 1:\n X = X.reshape(X.shape[0], 1)\n\n s = MinMaxScaler(feature_range=feature_range, copy=copy)\n if axis == 0:\n X = s.fit_transform(X)\n else:\n X = s.fit_transform(X.T).T\n\n if original_ndim == 1:\n X = X.ravel()\n\n X = to_output_type(X, output_type)\n return X\n\n\nclass StandardScaler(TransformerMixin, BaseEstimator):\n \"\"\"Standardize features by removing the mean and scaling to unit variance\n\n The standard score of a sample `x` is calculated as:\n\n z = (x - u) / s\n\n where `u` is the mean of the training samples or zero if `with_mean=False`,\n and `s` is the standard deviation of the training samples or one if\n `with_std=False`.\n\n Centering and scaling happen independently on each feature by computing\n the relevant statistics on the samples in the training set. Mean and\n standard deviation are then stored to be used on later data using\n :meth:`transform`.\n\n Standardization of a dataset is a common requirement for many\n machine learning estimators: they might behave badly if the\n individual features do not more or less look like standard normally\n distributed data (e.g. Gaussian with 0 mean and unit variance).\n\n For instance many elements used in the objective function of\n a learning algorithm (such as the RBF kernel of Support Vector\n Machines or the L1 and L2 regularizers of linear models) assume that\n all features are centered around 0 and have variance in the same\n order. If a feature has a variance that is orders of magnitude larger\n that others, it might dominate the objective function and make the\n estimator unable to learn from other features correctly as expected.\n\n This scaler can also be applied to sparse CSR or CSC matrices by passing\n `with_mean=False` to avoid breaking the sparsity structure of the data.\n\n Parameters\n ----------\n copy : boolean, optional, default True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n with_mean : boolean, True by default\n If True, center the data before scaling.\n This does not work (and will raise an exception) when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_std : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n Attributes\n ----------\n scale_ : ndarray or None, shape (n_features,)\n Per feature relative scaling of the data. This is calculated using\n `sqrt(var_)`. Equal to ``None`` when ``with_std=False``.\n\n mean_ : ndarray or None, shape (n_features,)\n The mean value for each feature in the training set.\n Equal to ``None`` when ``with_mean=False``.\n\n var_ : ndarray or None, shape (n_features,)\n The variance for each feature in the training set. Used to compute\n `scale_`. Equal to ``None`` when ``with_std=False``.\n\n n_samples_seen_ : int or array, shape (n_features,)\n The number of samples processed by the estimator for each feature.\n If there are not missing samples, the ``n_samples_seen`` will be an\n integer, otherwise it will be an array.\n Will be reset on new calls to fit, but increments across\n ``partial_fit`` calls.\n\n Examples\n --------\n >>> from cuml.preprocessing import StandardScaler\n >>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]\n >>> scaler = StandardScaler()\n >>> print(scaler.fit(data))\n StandardScaler()\n >>> print(scaler.mean_)\n [0.5 0.5]\n >>> print(scaler.transform(data))\n [[-1. -1.]\n [-1. -1.]\n [ 1. 1.]\n [ 1. 1.]]\n >>> print(scaler.transform([[2, 2]]))\n [[3. 3.]]\n\n See also\n --------\n scale: Equivalent function without the estimator API.\n\n :class:`cuml.decomposition.PCA`\n Further removes the linear correlation across features with 'whiten=True'.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n\n We use a biased estimator for the standard deviation, equivalent to\n `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to\n affect model performance.\n \"\"\" # noqa\n\n @_deprecate_positional_args\n def __init__(self, *, copy=True, with_mean=True, with_std=True):\n self.with_mean = with_mean\n self.with_std = with_std\n self.copy = copy\n\n def _reset(self):\n \"\"\"Reset internal data-dependent state of the scaler, if necessary.\n\n __init__ parameters are not touched.\n \"\"\"\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_\n\n def fit(self, X, y=None):\n \"\"\"Compute the mean and std to be used for later scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y\n Ignored\n \"\"\"\n\n # Reset internal state before fitting\n self._reset()\n return self.partial_fit(X, y)\n\n def partial_fit(self, X, y=None):\n \"\"\"\n Online computation of mean and std on X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n The algorithm for incremental mean and std is given in Equation 1.5a,b\n in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. \"Algorithms\n for computing the sample variance: Analysis and recommendations.\"\n The American Statistician 37.3 (1983): 242-247:\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Transformer instance.\n \"\"\"\n X = self._validate_data(X, accept_sparse=('csr', 'csc'),\n estimator=self, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n # Even in the case of `with_mean=False`, we update the mean anyway\n # This is needed for the incremental computation of the var\n # See incr_mean_variance_axis and _incremental_mean_variance_axis\n\n # if n_samples_seen_ is an integer (i.e. no missing values), we need to\n # transform it to a NumPy array of shape (n_features,) required by\n # incr_mean_variance_axis and _incremental_variance_axis\n if (hasattr(self, 'n_samples_seen_') and\n isinstance(self.n_samples_seen_, numbers.Integral)):\n self.n_samples_seen_ = np.repeat(\n self.n_samples_seen_, X.shape[1]).astype(np.int64, copy=False)\n\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n\n if X.format == 'csr':\n X = X.tocsc()\n\n counts_nan = np.empty(X.shape[1])\n _isnan = np.isnan(X.data)\n\n start = X.indptr[0]\n for i, end in enumerate(X.indptr[1:]):\n counts_nan[i] = _isnan[start:end].sum()\n start = end\n\n if not hasattr(self, 'n_samples_seen_'):\n self.n_samples_seen_ = (\n X.shape[0] - counts_nan).astype(np.int64, copy=False)\n\n if self.with_std:\n # First pass\n if not hasattr(self, 'scale_'):\n self.mean_, self.var_ = mean_variance_axis(X, axis=0)\n\n # TODO\n \"\"\"\n # Next passes\n else:\n self.mean_, self.var_, self.n_samples_seen_ = \\\n incr_mean_variance_axis(X, axis=0,\n last_mean=self.mean_,\n last_var=self.var_,\n last_n=self.n_samples_seen_)\n \"\"\"\n else:\n self.mean_ = None\n self.var_ = None\n if hasattr(self, 'scale_'):\n self.n_samples_seen_ += X.shape[0] - counts_nan\n else:\n if not hasattr(self, 'n_samples_seen_'):\n self.n_samples_seen_ = np.zeros(X.shape[1], dtype=np.int64)\n\n # First pass\n if not hasattr(self, 'scale_'):\n self.mean_ = .0\n if self.with_std:\n self.var_ = .0\n else:\n self.var_ = None\n\n if not self.with_mean and not self.with_std:\n self.mean_ = None\n self.var_ = None\n self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)\n else:\n self.mean_, self.var_, self.n_samples_seen_ = \\\n _incremental_mean_and_var(X, self.mean_, self.var_,\n self.n_samples_seen_)\n\n # for backward-compatibility, reduce n_samples_seen_ to an integer\n # if the number of samples is the same for each feature (i.e. no\n # missing values)\n ptp = np.amax(self.n_samples_seen_) - np.amin(self.n_samples_seen_)\n if ptp == 0:\n self.n_samples_seen_ = self.n_samples_seen_[0]\n del ptp\n\n if self.with_std:\n self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))\n else:\n self.scale_ = None\n\n return self\n\n def transform(self, X, copy=None):\n \"\"\"Perform standardization by centering and scaling\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data used to scale along the features axis.\n copy : bool, optional (default: None)\n Whether a forced copy will be triggered. If copy=False,\n a copy might be triggered by a conversion.\n \"\"\"\n check_is_fitted(self)\n\n copy = copy if copy is not None else self.copy\n\n output_type = get_input_type(X)\n X = self._validate_data(X, reset=False,\n accept_sparse=['csr', 'csc'], copy=copy,\n estimator=self, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot center sparse matrices: pass `with_mean=False` \"\n \"instead. See docstring for motivation and alternatives.\")\n if self.scale_ is not None:\n inplace_column_scale(X, 1 / self.scale_)\n else:\n if self.with_mean:\n X -= self.mean_\n if self.with_std:\n X /= self.scale_\n\n X = to_output_type(X, output_type)\n return X\n\n def inverse_transform(self, X, copy=None):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data used to scale along the features axis.\n copy : bool, optional (default: None)\n Whether a forced copy will be triggered. If copy=False,\n a copy might be triggered by a conversion.\n\n Returns\n -------\n X_tr : {array-like, sparse matrix}, shape [n_samples, n_features]\n Transformed array.\n \"\"\"\n check_is_fitted(self)\n\n copy = copy if copy is not None else self.copy\n\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy,\n estimator=self, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError(\n \"Cannot uncenter sparse matrices: pass `with_mean=False` \"\n \"instead See docstring for motivation and alternatives.\")\n if not sparse.isspmatrix_csr(X):\n X = X.tocsr()\n copy = False\n if copy:\n X = X.copy()\n if self.scale_ is not None:\n inplace_column_scale(X, self.scale_)\n else:\n X = np.asarray(X)\n if copy:\n X = X.copy()\n if self.with_std:\n X *= self.scale_\n if self.with_mean:\n X += self.mean_\n\n X = to_output_type(X, output_type)\n return X\n\n def _more_tags(self):\n return {'allow_nan': True}\n\n\nclass MaxAbsScaler(TransformerMixin, BaseEstimator):\n \"\"\"Scale each feature by its maximum absolute value.\n\n This estimator scales and translates each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0. It does not shift/center the data, and\n thus does not destroy any sparsity.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n copy : boolean, optional, default is True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n Attributes\n ----------\n scale_ : ndarray, shape (n_features,)\n Per feature relative scaling of the data.\n\n max_abs_ : ndarray, shape (n_features,)\n Per feature maximum absolute value.\n\n n_samples_seen_ : int\n The number of samples processed by the estimator. Will be reset on\n new calls to fit, but increments across ``partial_fit`` calls.\n\n Examples\n --------\n >>> from cuml.preprocessing import MaxAbsScaler\n >>> X = [[ 1., -1., 2.],\n ... [ 2., 0., 0.],\n ... [ 0., 1., -1.]]\n >>> transformer = MaxAbsScaler().fit(X)\n >>> transformer\n MaxAbsScaler()\n >>> transformer.transform(X)\n array([[ 0.5, -1. , 1. ],\n [ 1. , 0. , 0. ],\n [ 0. , 1. , -0.5]])\n\n See also\n --------\n maxabs_scale: Equivalent function without the estimator API.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n \"\"\"\n\n @check_cupy8()\n @_deprecate_positional_args\n def __init__(self, *, copy=True):\n self.copy = copy\n\n def _reset(self):\n \"\"\"Reset internal data-dependent state of the scaler, if necessary.\n\n __init__ parameters are not touched.\n \"\"\"\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.max_abs_\n\n def fit(self, X, y=None):\n \"\"\"Compute the maximum absolute value to be used for later scaling.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data used to compute the per-feature minimum and maximum\n used for later scaling along the features axis.\n \"\"\"\n\n # Reset internal state before fitting\n self._reset()\n return self.partial_fit(X, y)\n\n def partial_fit(self, X, y=None):\n \"\"\"\n Online computation of max absolute value of X for later scaling.\n\n All of X is processed as a single batch. This is intended for cases\n when :meth:`fit` is not feasible due to very large number of\n `n_samples` or because X is read from a continuous stream.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n\n y : None\n Ignored.\n\n Returns\n -------\n self : object\n Transformer instance.\n \"\"\"\n first_pass = not hasattr(self, 'n_samples_seen_')\n X = self._validate_data(X, reset=first_pass,\n accept_sparse=('csr', 'csc'), estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n if sparse.issparse(X):\n mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)\n max_abs = np.maximum(np.abs(mins), np.abs(maxs))\n else:\n max_abs = np.nanmax(np.abs(X), axis=0)\n\n if first_pass:\n self.n_samples_seen_ = X.shape[0]\n else:\n max_abs = np.maximum(self.max_abs_, max_abs)\n self.n_samples_seen_ += X.shape[0]\n\n self.max_abs_ = max_abs\n self.scale_ = _handle_zeros_in_scale(max_abs)\n return self\n\n def transform(self, X):\n \"\"\"Scale the data\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n The data that should be scaled.\n \"\"\"\n check_is_fitted(self)\n\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n estimator=self, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n if sparse.issparse(X):\n inplace_column_scale(X, 1.0 / self.scale_)\n else:\n X /= self.scale_\n\n X = to_output_type(X, output_type)\n return X\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n The data that should be transformed back.\n \"\"\"\n check_is_fitted(self)\n\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n estimator=self, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n if sparse.issparse(X):\n inplace_column_scale(X, self.scale_)\n else:\n X *= self.scale_\n\n X = to_output_type(X, output_type)\n return X\n\n def _more_tags(self):\n return {'allow_nan': True}\n\n\n@check_cupy8()\n@_deprecate_positional_args\ndef maxabs_scale(X, *, axis=0, copy=True):\n \"\"\"Scale each feature to the [-1, 1] range without breaking the sparsity.\n\n This estimator scales each feature individually such\n that the maximal absolute value of each feature in the\n training set will be 1.0.\n\n This scaler can also be applied to sparse CSR or CSC matrices.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The data.\n\n axis : int (0 by default)\n axis used to scale along. If 0, independently scale each feature,\n otherwise (if 1) scale each sample.\n\n copy : boolean, optional, default is True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n See also\n --------\n MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API\n\n Notes\n -----\n NaNs are treated as missing values: disregarded to compute the statistics,\n and maintained during the data transformation.\n \"\"\" # noqa\n # Unlike the scaler object, this function allows 1d input.\n\n # If copy is required, it will be done inside the scaler object.\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,\n ensure_2d=False, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n original_ndim = X.ndim\n\n if original_ndim == 1:\n X = X.reshape(X.shape[0], 1)\n\n s = MaxAbsScaler(copy=copy)\n if axis == 0:\n X = s.fit_transform(X)\n else:\n X = s.fit_transform(X.T).T\n\n if original_ndim == 1:\n X = X.ravel()\n\n return X\n\n\nclass RobustScaler(TransformerMixin, BaseEstimator):\n \"\"\"Scale features using statistics that are robust to outliers.\n\n This Scaler removes the median and scales the data according to the\n quantile range (defaults to IQR: Interquartile Range). The IQR is the range\n between the 1st quartile (25th quantile) and the 3rd quartile (75th\n quantile).\n\n Centering and scaling happen independently on each feature by computing the\n relevant statistics on the samples in the training set. Median and\n interquartile range are then stored to be used on later data using the\n ``transform`` method.\n\n Standardization of a dataset is a common requirement for many machine\n learning estimators. Typically this is done by removing the mean and\n scaling to unit variance. However, outliers can often influence the sample\n mean / variance in a negative way. In such cases, the median and the\n interquartile range often give better results.\n\n Parameters\n ----------\n\n with_centering : boolean, default=True\n If True, center the data before scaling.\n This will cause ``transform`` to raise an exception when attempted on\n sparse matrices, because centering them entails building a dense\n matrix which in common use cases is likely to be too large to fit in\n memory.\n\n with_scaling : boolean, default=True\n If True, scale the data to interquartile range.\n\n quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0\n Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR\n Quantile range used to calculate ``scale_``.\n\n copy : boolean, optional, default=True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n Attributes\n ----------\n center_ : array of floats\n The median value for each feature in the training set.\n\n scale_ : array of floats\n The (scaled) interquartile range for each feature in the training set.\n\n Examples\n --------\n >>> from cuml.preprocessing import RobustScaler\n >>> X = [[ 1., -2., 2.],\n ... [ -2., 1., 3.],\n ... [ 4., 1., -2.]]\n >>> transformer = RobustScaler().fit(X)\n >>> transformer\n RobustScaler()\n >>> transformer.transform(X)\n array([[ 0. , -2. , 0. ],\n [-1. , 0. , 0.4],\n [ 1. , 0. , -1.6]])\n\n See also\n --------\n\n robust_scale: Equivalent function without the estimator API.\n\n cuml.decomposition.PCA: Further removes the linear correlation across\n features with ``whiten=True``.\n\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, *, with_centering=True, with_scaling=True,\n quantile_range=(25.0, 75.0), copy=True):\n self.with_centering = with_centering\n self.with_scaling = with_scaling\n self.quantile_range = quantile_range\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Compute the median and quantiles to be used for scaling.\n\n Parameters\n ----------\n X : {array-like, CSC matrix}, shape [n_samples, n_features]\n The data used to compute the median and quantiles\n used for later scaling along the features axis.\n \"\"\"\n # at fit, convert sparse matrices to csc for optimized computation of\n # the quantiles\n X = self._validate_data(X, accept_sparse='csc', estimator=self,\n dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n q_min, q_max = self.quantile_range\n if not 0 <= q_min <= q_max <= 100:\n raise ValueError(\"Invalid quantile range: %s\" %\n str(self.quantile_range))\n\n if self.with_centering:\n if sparse.issparse(X):\n raise ValueError(\n \"Cannot center sparse matrices: use `with_centering=False`\"\n \" instead. See docstring for motivation and alternatives.\")\n middle, is_odd = divmod(X.shape[0], 2)\n X_sorted = np.sort(X, axis=0)\n if is_odd:\n self.center_ = X_sorted[middle]\n else:\n elm1 = X_sorted[middle-1]\n elm2 = X_sorted[middle]\n self.center_ = (elm1 + elm2) / 2.\n else:\n self.center_ = None\n\n if self.with_scaling:\n quantiles = []\n for feature_idx in range(X.shape[1]):\n if sparse.issparse(X):\n column_nnz_data = X.data[X.indptr[feature_idx]:\n X.indptr[feature_idx + 1]]\n column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)\n column_data[:len(column_nnz_data)] = column_nnz_data\n else:\n column_data = X[:, feature_idx]\n\n is_not_nan = ~np.isnan(column_data).astype(np.bool)\n column_data = column_data[is_not_nan]\n quantiles.append(np.percentile(column_data,\n self.quantile_range))\n\n quantiles = np.array(quantiles).T\n\n self.scale_ = quantiles[1] - quantiles[0]\n self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)\n else:\n self.scale_ = None\n\n return self\n\n def transform(self, X):\n \"\"\"Center and scale the data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n The data used to scale along the specified axis.\n \"\"\"\n check_is_fitted(self)\n\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n estimator=self, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n if sparse.issparse(X):\n if self.with_scaling:\n inplace_column_scale(X, 1.0 / self.scale_)\n else:\n if self.with_centering:\n X -= self.center_\n if self.with_scaling:\n X /= self.scale_\n return to_output_type(X, output_type)\n\n def inverse_transform(self, X):\n \"\"\"Scale back the data to the original representation\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n The data used to scale along the specified axis.\n \"\"\"\n check_is_fitted(self)\n\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,\n estimator=self, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n\n if sparse.issparse(X):\n if self.with_scaling:\n inplace_column_scale(X, self.scale_)\n else:\n if self.with_scaling:\n X *= self.scale_\n if self.with_centering:\n X += self.center_\n return to_output_type(X, output_type)\n\n def _more_tags(self):\n return {'allow_nan': True}\n\n\n@_deprecate_positional_args\ndef robust_scale(X, *, axis=0, with_centering=True, with_scaling=True,\n quantile_range=(25.0, 75.0), copy=True):\n \"\"\"\n Standardize a dataset along any axis\n\n Center to the median and component wise scale\n according to the interquartile range.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n The data to center and scale.\n\n axis : int (0 by default)\n axis used to compute the medians and IQR along. If 0,\n independently scale each feature, otherwise (if 1) scale\n each sample.\n\n with_centering : boolean, True by default\n If True, center the data before scaling.\n\n with_scaling : boolean, True by default\n If True, scale the data to unit variance (or equivalently,\n unit standard deviation).\n\n quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0\n Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR\n Quantile range used to calculate ``scale_``.\n\n copy : boolean, optional, default is True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n Notes\n -----\n This implementation will refuse to center sparse matrices\n since it would make them non-sparse and would potentially crash the\n program with memory exhaustion problems.\n\n Instead the caller is expected to either set explicitly\n `with_centering=False` (in that case, only variance scaling will be\n performed on the features of the CSR matrix) or to densify the matrix\n if he/she expects the materialized dense array to fit in memory.\n\n To avoid memory copy the caller should pass a CSR matrix.\n\n See also\n --------\n RobustScaler: Performs centering and scaling using the ``Transformer`` API\n\n \"\"\"\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,\n ensure_2d=False, dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n original_ndim = X.ndim\n\n if original_ndim == 1:\n X = X.reshape(X.shape[0], 1)\n\n s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,\n quantile_range=quantile_range, copy=copy)\n if axis == 0:\n X = s.fit_transform(X)\n else:\n X = s.fit_transform(X.T).T\n\n if original_ndim == 1:\n X = X.ravel()\n\n return to_output_type(X, output_type)\n\n\nclass PolynomialFeatures(TransformerMixin, BaseEstimator):\n \"\"\"Generate polynomial and interaction features.\n\n Generate a new feature matrix consisting of all polynomial combinations\n of the features with degree less than or equal to the specified degree.\n For example, if an input sample is two dimensional and of the form\n [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].\n\n Parameters\n ----------\n degree : integer\n The degree of the polynomial features. Default = 2.\n\n interaction_only : boolean, default = False\n If true, only interaction features are produced: features that are\n products of at most ``degree`` *distinct* input features (so not\n ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).\n\n include_bias : boolean\n If True (default), then include a bias column, the feature in which\n all polynomial powers are zero (i.e. a column of ones - acts as an\n intercept term in a linear model).\n\n order : str in {'C', 'F'}, default 'C'\n Order of output array in the dense case. 'F' order is faster to\n compute, but may slow down subsequent estimators.\n\n Examples\n --------\n >>> import numpy as np\n >>> from cuml.preprocessing import PolynomialFeatures\n >>> X = np.arange(6).reshape(3, 2)\n >>> X\n array([[0, 1],\n [2, 3],\n [4, 5]])\n >>> poly = PolynomialFeatures(2)\n >>> poly.fit_transform(X)\n array([[ 1., 0., 1., 0., 0., 1.],\n [ 1., 2., 3., 4., 6., 9.],\n [ 1., 4., 5., 16., 20., 25.]])\n >>> poly = PolynomialFeatures(interaction_only=True)\n >>> poly.fit_transform(X)\n array([[ 1., 0., 1., 0.],\n [ 1., 2., 3., 6.],\n [ 1., 4., 5., 20.]])\n\n Attributes\n ----------\n powers_ : array, shape (n_output_features, n_input_features)\n powers_[i, j] is the exponent of the jth input in the ith output.\n\n n_input_features_ : int\n The total number of input features.\n\n n_output_features_ : int\n The total number of polynomial output features. The number of output\n features is computed by iterating over all suitably sized combinations\n of input features.\n\n Notes\n -----\n Be aware that the number of features in the output array scales\n polynomially in the number of features of the input array, and\n exponentially in the degree. High degrees can cause overfitting.\n \"\"\"\n @check_cupy8()\n @_deprecate_positional_args\n def __init__(self, degree=2, *, interaction_only=False, include_bias=True,\n order='C'):\n self.degree = degree\n self.interaction_only = interaction_only\n self.include_bias = include_bias\n self.order = order\n\n @staticmethod\n @check_cupy8()\n def _combinations(n_features, degree, interaction_only, include_bias):\n comb = (combinations if interaction_only else combinations_w_r)\n start = int(not include_bias)\n return chain.from_iterable(comb(range(n_features), i)\n for i in range(start, degree + 1))\n\n @property\n def powers_(self):\n check_is_fitted(self)\n\n combinations = self._combinations(self.n_input_features_, self.degree,\n self.interaction_only,\n self.include_bias)\n return np.vstack([np.bincount(c, minlength=self.n_input_features_)\n for c in combinations])\n\n def get_feature_names(self, input_features=None):\n \"\"\"\n Return feature names for output features\n\n Parameters\n ----------\n input_features : list of string, length n_features, optional\n String names for input features if available. By default,\n \"x0\", \"x1\", ... \"xn_features\" is used.\n\n Returns\n -------\n output_feature_names : list of string, length n_output_features\n\n \"\"\"\n powers = self.powers_\n if input_features is None:\n input_features = ['x%d' % i for i in range(powers.shape[1])]\n feature_names = []\n for row in powers:\n inds = np.where(row)[0]\n if len(inds):\n name = \" \".join(\"%s^%d\" % (input_features[ind], exp)\n if exp != 1 else input_features[ind]\n for ind, exp in zip(inds, row[inds]))\n else:\n name = \"1\"\n feature_names.append(name)\n return feature_names\n\n def fit(self, X, y=None):\n \"\"\"\n Compute number of output features.\n\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The data.\n\n Returns\n -------\n self : instance\n \"\"\"\n n_samples, n_features = self._validate_data(\n X, accept_sparse=True).shape\n combinations = self._combinations(n_features, self.degree,\n self.interaction_only,\n self.include_bias)\n self.n_input_features_ = n_features\n self.n_output_features_ = sum(1 for _ in combinations)\n return self\n\n def transform(self, X):\n \"\"\"Transform data to polynomial features\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data to transform, row by row.\n\n Prefer CSR over CSC for sparse input (for speed), but CSC is\n required if the degree is 4 or higher. If the degree is less than\n 4 and the input format is CSC, it will be converted to CSR, have\n its polynomial features generated, then converted back to CSC.\n\n If the degree is 2 or 3, the method described in \"Leveraging\n Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices\n Using K-Simplex Numbers\" by Andrew Nystrom and John Hughes is\n used, which is much faster than the method used on CSC input. For\n this reason, a CSC input will be converted to CSR, and the output\n will be converted back to CSC prior to being returned, hence the\n preference of CSR.\n\n Returns\n -------\n XP : {array-like, sparse matrix}, shape [n_samples, NP]\n The matrix of features, where NP is the number of polynomial\n features generated from the combination of inputs.\n \"\"\"\n check_is_fitted(self)\n\n output_type = get_input_type(X)\n X = check_array(X, order='F', dtype=FLOAT_DTYPES,\n accept_sparse=('csr', 'csc'))\n\n n_samples, n_features = X.shape\n\n if n_features != self.n_input_features_:\n raise ValueError(\"X shape does not match training shape\")\n\n if sparse.isspmatrix_csr(X):\n if self.degree > 3:\n res = self.transform(X.tocsc()).tocsr()\n return to_output_type(res, output_type, order=self.order)\n to_stack = []\n if self.include_bias:\n bias = np.ones(shape=(n_samples, 1), dtype=X.dtype)\n to_stack.append(sparse.csr_matrix(bias))\n to_stack.append(X)\n for deg in range(2, self.degree+1):\n Xp_next = csr_polynomial_expansion(X, self.interaction_only,\n deg)\n if Xp_next is None:\n break\n to_stack.append(Xp_next)\n XP = sparse.hstack(to_stack, format='csr')\n elif sparse.isspmatrix_csc(X) and self.degree < 4:\n res = self.transform(X.tocsr()).tocsc()\n return to_output_type(res, output_type, order=self.order)\n else:\n if sparse.isspmatrix(X):\n combinations = self._combinations(n_features, self.degree,\n self.interaction_only,\n self.include_bias)\n columns = []\n for comb in combinations:\n if comb:\n out_col = 1\n for col_idx in comb:\n out_col = X[:, col_idx].multiply(out_col)\n columns.append(out_col)\n else:\n bias = sparse.csc_matrix(np.ones((X.shape[0], 1)))\n columns.append(bias)\n XP = sparse.hstack(columns, dtype=X.dtype).tocsc()\n else:\n XP = np.empty((n_samples, self.n_output_features_),\n dtype=X.dtype, order=self.order)\n\n # What follows is a faster implementation of:\n # for i, comb in enumerate(combinations):\n # XP[:, i] = X[:, comb].prod(1)\n # This implementation uses two optimisations.\n # First one is broadcasting,\n # multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1]\n # multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2]\n # ...\n # multiply ([X[:, start:end], X[:, start]) -> ...\n # Second optimisation happens for degrees >= 3.\n # Xi^3 is computed reusing previous computation:\n # Xi^3 = Xi^2 * Xi.\n\n if self.include_bias:\n XP[:, 0] = 1\n current_col = 1\n else:\n current_col = 0\n\n # d = 0\n XP[:, current_col:current_col + n_features] = X\n index = list(range(current_col,\n current_col + n_features))\n current_col += n_features\n index.append(current_col)\n\n # d >= 1\n for _ in range(1, self.degree):\n new_index = []\n end = index[-1]\n for feature_idx in range(n_features):\n start = index[feature_idx]\n new_index.append(current_col)\n if self.interaction_only:\n start += (index[feature_idx + 1] -\n index[feature_idx])\n next_col = current_col + end - start\n if next_col <= current_col:\n break\n # XP[:, start:end] are terms of degree d - 1\n # that exclude feature #feature_idx.\n np.multiply(XP[:, start:end],\n X[:, feature_idx:feature_idx + 1],\n out=XP[:, current_col:next_col],\n casting='no')\n current_col = next_col\n\n new_index.append(current_col)\n index = new_index\n\n XP = to_output_type(XP, output_type, order=self.order)\n return XP\n\n\n@check_cupy8()\n@_deprecate_positional_args\ndef normalize(X, norm='l2', *, axis=1, copy=True, return_norm=False):\n \"\"\"Scale input vectors individually to unit norm (vector length).\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data to normalize, element by element.\n Please provide CSC matrix to normalize on axis 0,\n conversely provide CSR matrix to normalize on axis 1\n\n norm : 'l1', 'l2', or 'max', optional ('l2' by default)\n The norm to use to normalize each non zero sample (or each non-zero\n feature if axis is 0).\n\n axis : 0 or 1, optional (1 by default)\n axis used to normalize the data along. If 1, independently normalize\n each sample, otherwise (if 0) normalize each feature.\n\n copy : boolean, optional, default True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n return_norm : boolean, default False\n whether to return the computed norms\n\n Returns\n -------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n Normalized input X.\n\n norms : array, shape [n_samples] if axis=1 else [n_features]\n An array of norms along given axis for X.\n When X is sparse, a NotImplementedError will be raised\n for norm 'l1' or 'l2'.\n\n See also\n --------\n Normalizer: Performs normalization using the ``Transformer`` API\n \"\"\"\n if norm not in ('l1', 'l2', 'max'):\n raise ValueError(\"'%s' is not a supported norm\" % norm)\n\n if axis == 0:\n sparse_format = 'csc'\n elif axis == 1:\n sparse_format = 'csr'\n else:\n raise ValueError(\"'%d' is not a supported axis\" % axis)\n\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=sparse_format, copy=copy,\n estimator='the normalize function', dtype=FLOAT_DTYPES)\n\n if axis == 0:\n X = X.T\n\n if sparse.issparse(X):\n if return_norm and norm in ('l1', 'l2'):\n raise NotImplementedError(\"return_norm=True is not implemented \"\n \"for sparse matrices with norm 'l1' \"\n \"or norm 'l2'\")\n if norm == 'l1':\n inplace_csr_row_normalize_l1(X)\n elif norm == 'l2':\n inplace_csr_row_normalize_l2(X)\n elif norm == 'max':\n mins, maxes = min_max_axis(X, 1)\n norms = np.maximum(abs(mins), maxes)\n norms_elementwise = norms.repeat(np.diff(X.indptr).tolist())\n mask = norms_elementwise != 0\n X.data[mask] /= norms_elementwise[mask]\n else:\n if norm == 'l1':\n norms = np.abs(X).sum(axis=1)\n elif norm == 'l2':\n norms = row_norms(X)\n elif norm == 'max':\n norms = np.max(abs(X), axis=1)\n norms = _handle_zeros_in_scale(norms, copy=False)\n X /= norms[:, np.newaxis]\n\n if axis == 0:\n X = X.T\n\n X = to_output_type(X, output_type)\n if return_norm:\n if output_type in {'dataframe', 'series'}:\n norms = to_output_type(norms, 'cudf')\n else:\n norms = to_output_type(norms, output_type)\n return X, norms\n else:\n return X\n\n\nclass Normalizer(TransformerMixin, BaseEstimator):\n \"\"\"Normalize samples individually to unit norm.\n\n Each sample (i.e. each row of the data matrix) with at least one\n non zero component is rescaled independently of other samples so\n that its norm (l1, l2 or inf) equals one.\n\n This transformer is able to work both with dense numpy arrays and\n sparse matrix\n\n Scaling inputs to unit norms is a common operation for text\n classification or clustering for instance. For instance the dot\n product of two l2-normalized TF-IDF vectors is the cosine similarity\n of the vectors and is the base similarity metric for the Vector\n Space Model commonly used by the Information Retrieval community.\n\n Parameters\n ----------\n norm : 'l1', 'l2', or 'max', optional ('l2' by default)\n The norm to use to normalize each non zero sample. If norm='max'\n is used, values will be rescaled by the maximum of the absolute\n values.\n\n copy : boolean, optional, default True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n Examples\n --------\n >>> from cuml.preprocessing import Normalizer\n >>> X = [[4, 1, 2, 2],\n ... [1, 3, 9, 3],\n ... [5, 7, 5, 1]]\n >>> transformer = Normalizer().fit(X) # fit does nothing.\n >>> transformer\n Normalizer()\n >>> transformer.transform(X)\n array([[0.8, 0.2, 0.4, 0.4],\n [0.1, 0.3, 0.9, 0.3],\n [0.5, 0.7, 0.5, 0.1]])\n\n Notes\n -----\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n\n\n See also\n --------\n normalize: Equivalent function without the estimator API.\n \"\"\"\n\n @check_cupy8()\n @_deprecate_positional_args\n def __init__(self, norm='l2', *, copy=True):\n self.norm = norm\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : {array-like, CSR matrix}\n \"\"\"\n self._validate_data(X, accept_sparse='csr')\n return self\n\n def transform(self, X, copy=None):\n \"\"\"Scale each non zero row of X to unit norm\n\n Parameters\n ----------\n X : {array-like, CSR matrix}, shape [n_samples, n_features]\n The data to normalize, row by row.\n copy : bool, optional (default: None)\n Whether a forced copy will be triggered. If copy=False,\n a copy might be triggered by a conversion.\n \"\"\"\n output_type = get_input_type(X)\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr')\n X = normalize(X, norm=self.norm, axis=1, copy=copy)\n return to_output_type(X, output_type)\n\n def _more_tags(self):\n return {'stateless': True}\n\n\n@_deprecate_positional_args\ndef binarize(X, *, threshold=0.0, copy=True):\n \"\"\"Boolean thresholding of array-like or sparse matrix\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data to binarize, element by element.\n\n threshold : float, optional (0.0 by default)\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : boolean, optional, default True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n See also\n --------\n Binarizer: Performs binarization using the ``Transformer`` API\n \"\"\"\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)\n if sparse.issparse(X):\n if threshold < 0:\n raise ValueError('Cannot binarize a sparse matrix with threshold '\n '< 0')\n cond = X.data > threshold\n not_cond = np.logical_not(cond)\n X.data[cond] = 1\n X.data[not_cond] = 0\n X.eliminate_zeros()\n else:\n cond = X > threshold\n not_cond = np.logical_not(cond)\n X[cond] = 1\n X[not_cond] = 0\n return to_output_type(X, output_type)\n\n\nclass Binarizer(TransformerMixin, BaseEstimator):\n \"\"\"Binarize data (set feature values to 0 or 1) according to a threshold\n\n Values greater than the threshold map to 1, while values less than\n or equal to the threshold map to 0. With the default threshold of 0,\n only positive values map to 1.\n\n Binarization is a common operation on text count data where the\n analyst can decide to only consider the presence or absence of a\n feature rather than a quantified number of occurrences for instance.\n\n It can also be used as a pre-processing step for estimators that\n consider boolean random variables (e.g. modelled using the Bernoulli\n distribution in a Bayesian setting).\n\n Parameters\n ----------\n threshold : float, optional (0.0 by default)\n Feature values below or equal to this are replaced by 0, above it by 1.\n Threshold may not be less than 0 for operations on sparse matrices.\n\n copy : boolean, optional, default True\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n Examples\n --------\n >>> from cuml.preprocessing import Binarizer\n >>> X = [[ 1., -1., 2.],\n ... [ 2., 0., 0.],\n ... [ 0., 1., -1.]]\n >>> transformer = Binarizer().fit(X) # fit does nothing.\n >>> transformer\n Binarizer()\n >>> transformer.transform(X)\n array([[1., 0., 1.],\n [1., 0., 0.],\n [0., 1., 0.]])\n\n Notes\n -----\n If the input is a sparse matrix, only the non-zero values are subject\n to update by the Binarizer class.\n\n This estimator is stateless (besides constructor parameters), the\n fit method does nothing but is useful when used in a pipeline.\n\n See also\n --------\n binarize: Equivalent function without the estimator API.\n \"\"\"\n\n @_deprecate_positional_args\n def __init__(self, *, threshold=0.0, copy=True):\n self.threshold = threshold\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Do nothing and return the estimator unchanged\n\n This method is just there to implement the usual API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n \"\"\"\n self._validate_data(X, accept_sparse=['csr', 'csc'])\n return self\n\n def transform(self, X, copy=None):\n \"\"\"Binarize each element of X\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n The data to binarize, element by element.\n\n copy : bool\n Whether a forced copy will be triggered. If copy=False,\n a copy might be triggered by a conversion.\n \"\"\"\n copy = copy if copy is not None else self.copy\n return binarize(X, threshold=self.threshold, copy=copy)\n\n def _more_tags(self):\n return {'stateless': True}\n\n\nclass KernelCenterer(TransformerMixin, BaseEstimator):\n \"\"\"Center a kernel matrix\n\n Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a\n function mapping x to a Hilbert space. KernelCenterer centers (i.e.,\n normalize to have zero mean) the data without explicitly computing phi(x).\n It is equivalent to centering phi(x) with\n sklearn.preprocessing.StandardScaler(with_std=False).\n\n Read more in the :ref:`User Guide <kernel_centering>`.\n\n Attributes\n ----------\n K_fit_rows_ : array, shape (n_samples,)\n Average of each column of kernel matrix\n\n K_fit_all_ : float\n Average of kernel matrix\n\n Examples\n --------\n >>> from sklearn.preprocessing import KernelCenterer\n >>> from sklearn.metrics.pairwise import pairwise_kernels\n >>> X = [[ 1., -2., 2.],\n ... [ -2., 1., 3.],\n ... [ 4., 1., -2.]]\n >>> K = pairwise_kernels(X, metric='linear')\n >>> K\n array([[ 9., 2., -2.],\n [ 2., 14., -13.],\n [ -2., -13., 21.]])\n >>> transformer = KernelCenterer().fit(K)\n >>> transformer\n KernelCenterer()\n >>> transformer.transform(K)\n array([[ 5., 0., -5.],\n [ 0., 14., -14.],\n [ -5., -14., 19.]])\n \"\"\"\n\n def __init__(self):\n # Needed for backported inspect.signature compatibility with PyPy\n pass\n\n def fit(self, K, y=None):\n \"\"\"Fit KernelCenterer\n\n Parameters\n ----------\n K : numpy array of shape [n_samples, n_samples]\n Kernel matrix.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n\n K = self._validate_data(K, dtype=FLOAT_DTYPES)\n\n if K.shape[0] != K.shape[1]:\n raise ValueError(\"Kernel matrix must be a square matrix.\"\n \" Input is a {}x{} matrix.\"\n .format(K.shape[0], K.shape[1]))\n\n n_samples = K.shape[0]\n self.K_fit_rows_ = np.sum(K, axis=0) / n_samples\n self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples\n return self\n\n def transform(self, K, copy=True):\n \"\"\"Center kernel matrix.\n\n Parameters\n ----------\n K : numpy array of shape [n_samples1, n_samples2]\n Kernel matrix.\n\n copy : boolean, optional, default True\n Whether a forced copy will be triggered. If copy=False,\n a copy might be triggered by a conversion.\n\n Returns\n -------\n K_new : numpy array of shape [n_samples1, n_samples2]\n \"\"\"\n check_is_fitted(self)\n\n K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)\n\n K_pred_cols = (np.sum(K, axis=1) /\n self.K_fit_rows_.shape[0])[:, np.newaxis]\n\n K -= self.K_fit_rows_\n K -= K_pred_cols\n K += self.K_fit_all_\n\n return K\n\n @property\n def _pairwise(self):\n return True\n\n\ndef add_dummy_feature(X, value=1.0):\n \"\"\"Augment dataset with an additional dummy feature.\n\n This is useful for fitting an intercept term with implementations which\n cannot otherwise fit it directly.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape [n_samples, n_features]\n Data.\n\n value : float\n Value to use for the dummy feature.\n\n Returns\n -------\n\n X : {array, sparse matrix}, shape [n_samples, n_features + 1]\n Same data with dummy feature added as first column.\n\n Examples\n --------\n\n >>> from cuml.preprocessing import add_dummy_feature\n >>> add_dummy_feature([[0, 1], [1, 0]])\n array([[1., 0., 1.],\n [1., 1., 0.]])\n \"\"\"\n output_type = get_input_type(X)\n X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)\n n_samples, n_features = X.shape\n shape = (n_samples, n_features + 1)\n if sparse.issparse(X):\n if sparse.isspmatrix_coo(X):\n # Shift columns to the right.\n col = X.col + 1\n # Column indices of dummy feature are 0 everywhere.\n col = np.concatenate((np.zeros(n_samples), col))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n row = np.concatenate((np.arange(n_samples), X.row))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.full(n_samples, value), X.data))\n X = sparse.coo_matrix((data, (row, col)), shape)\n return to_output_type(X, output_type)\n elif sparse.isspmatrix_csc(X):\n # Shift index pointers since we need to add n_samples elements.\n indptr = X.indptr + n_samples\n # indptr[0] must be 0.\n indptr = np.concatenate((np.array([0]), indptr))\n # Row indices of dummy feature are 0, ..., n_samples-1.\n indices = np.concatenate((np.arange(n_samples), X.indices))\n # Prepend the dummy feature n_samples times.\n data = np.concatenate((np.full(n_samples, value), X.data))\n X = sparse.csc_matrix((data, indices, indptr), shape)\n return to_output_type(X, output_type)\n else:\n klass = X.__class__\n X = klass(add_dummy_feature(X.tocoo(), value))\n return to_output_type(X, output_type)\n else:\n X = np.hstack((np.full((n_samples, 1), value), X))\n return to_output_type(X, output_type)\n\n\nclass QuantileTransformer(TransformerMixin, BaseEstimator):\n \"\"\"Transform features using quantiles information.\n\n This method transforms the features to follow a uniform or a normal\n distribution. Therefore, for a given feature, this transformation tends\n to spread out the most frequent values. It also reduces the impact of\n (marginal) outliers: this is therefore a robust preprocessing scheme.\n\n The transformation is applied on each feature independently. First an\n estimate of the cumulative distribution function of a feature is\n used to map the original values to a uniform distribution. The obtained\n values are then mapped to the desired output distribution using the\n associated quantile function. Features values of new/unseen data that fall\n below or above the fitted range will be mapped to the bounds of the output\n distribution. Note that this transform is non-linear. It may distort linear\n correlations between variables measured at the same scale but renders\n variables measured at different scales more directly comparable.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n .. versionadded:: 0.19\n\n Parameters\n ----------\n n_quantiles : int, optional (default=1000 or n_samples)\n Number of quantiles to be computed. It corresponds to the number\n of landmarks used to discretize the cumulative distribution function.\n If n_quantiles is larger than the number of samples, n_quantiles is set\n to the number of samples as a larger number of quantiles does not give\n a better approximation of the cumulative distribution function\n estimator.\n\n output_distribution : str, optional (default='uniform')\n Marginal distribution for the transformed data. The choices are\n 'uniform' (default) or 'normal'.\n\n ignore_implicit_zeros : bool, optional (default=False)\n Only applies to sparse matrices. If True, the sparse entries of the\n matrix are discarded to compute the quantile statistics. If False,\n these entries are treated as zeros.\n\n subsample : int, optional (default=1e5)\n Maximum number of samples used to estimate the quantiles for\n computational efficiency. Note that the subsampling procedure may\n differ for value-identical sparse and dense matrices.\n\n random_state : int, RandomState instance or None, optional (default=None)\n Determines random number generation for subsampling and smoothing\n noise.\n Please see ``subsample`` for more details.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`\n\n copy : boolean, optional, (default=True)\n Set to False to perform inplace transformation and avoid a copy (if the\n input is already a numpy array).\n\n Attributes\n ----------\n n_quantiles_ : integer\n The actual number of quantiles used to discretize the cumulative\n distribution function.\n\n quantiles_ : ndarray, shape (n_quantiles, n_features)\n The values corresponding the quantiles of reference.\n\n references_ : ndarray, shape(n_quantiles, )\n Quantiles of references.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import QuantileTransformer\n >>> rng = np.random.RandomState(0)\n >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)\n >>> qt = QuantileTransformer(n_quantiles=10, random_state=0)\n >>> qt.fit_transform(X)\n array([...])\n\n See also\n --------\n quantile_transform : Equivalent function without the estimator API.\n PowerTransformer : Perform mapping to a normal distribution using a power\n transform.\n StandardScaler : Perform standardization that is faster, but less robust\n to outliers.\n RobustScaler : Perform robust standardization that removes the influence\n of outliers but does not put outliers and inliers on the same scale.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n \"\"\"\n\n @_deprecate_positional_args\n def __init__(self, *, n_quantiles=1000, output_distribution='uniform',\n ignore_implicit_zeros=False, subsample=int(1e5),\n random_state=None, copy=True):\n self.n_quantiles = n_quantiles\n self.output_distribution = output_distribution\n self.ignore_implicit_zeros = ignore_implicit_zeros\n self.subsample = subsample\n self.random_state = random_state\n self.copy = copy\n\n def _dense_fit(self, X, random_state):\n \"\"\"Compute percentiles for dense matrices.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n The data used to scale along the features axis.\n \"\"\"\n if self.ignore_implicit_zeros:\n warnings.warn(\"'ignore_implicit_zeros' takes effect only with\"\n \" sparse matrix. This parameter has no effect.\")\n\n n_samples, n_features = X.shape\n references = self.references_ * 100\n\n self.quantiles_ = []\n for col in X.T:\n if self.subsample < n_samples:\n subsample_idx = random_state.choice(n_samples,\n size=self.subsample,\n replace=False)\n col = col.take(subsample_idx, mode='clip')\n self.quantiles_.append(np.nanpercentile(col, references))\n self.quantiles_ = np.transpose(self.quantiles_)\n # Due to floating-point precision error in `np.nanpercentile`,\n # make sure that quantiles are monotonically increasing.\n # Upstream issue in numpy:\n # https://github.com/numpy/numpy/issues/14685\n self.quantiles_ = np.maximum.accumulate(self.quantiles_)\n\n def _sparse_fit(self, X, random_state):\n \"\"\"Compute percentiles for sparse matrices.\n\n Parameters\n ----------\n X : sparse matrix CSC, shape (n_samples, n_features)\n The data used to scale along the features axis. The sparse matrix\n needs to be nonnegative.\n \"\"\"\n n_samples, n_features = X.shape\n references = self.references_ * 100\n\n self.quantiles_ = []\n for feature_idx in range(n_features):\n column_nnz_data = X.data[X.indptr[feature_idx]:\n X.indptr[feature_idx + 1]]\n if len(column_nnz_data) > self.subsample:\n column_subsample = (self.subsample * len(column_nnz_data) //\n n_samples)\n if self.ignore_implicit_zeros:\n column_data = np.zeros(shape=column_subsample,\n dtype=X.dtype)\n else:\n column_data = np.zeros(shape=self.subsample, dtype=X.dtype)\n column_data[:column_subsample] = random_state.choice(\n column_nnz_data, size=column_subsample, replace=False)\n else:\n if self.ignore_implicit_zeros:\n column_data = np.zeros(shape=len(column_nnz_data),\n dtype=X.dtype)\n else:\n column_data = np.zeros(shape=n_samples, dtype=X.dtype)\n column_data[:len(column_nnz_data)] = column_nnz_data\n\n if not column_data.size:\n # if no nnz, an error will be raised for computing the\n # quantiles. Force the quantiles to be zeros.\n self.quantiles_.append([0] * len(references))\n else:\n self.quantiles_.append(\n np.nanpercentile(column_data, references))\n self.quantiles_ = np.transpose(self.quantiles_)\n # due to floating-point precision error in `np.nanpercentile`,\n # make sure the quantiles are monotonically increasing\n # Upstream issue in numpy:\n # https://github.com/numpy/numpy/issues/14685\n self.quantiles_ = np.maximum.accumulate(self.quantiles_)\n\n def fit(self, X, y=None):\n \"\"\"Compute the quantiles used for transforming.\n\n Parameters\n ----------\n X : ndarray or sparse matrix, shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n Returns\n -------\n self : object\n \"\"\"\n if self.n_quantiles <= 0:\n raise ValueError(\"Invalid value for 'n_quantiles': %d. \"\n \"The number of quantiles must be at least one.\"\n % self.n_quantiles)\n\n if self.subsample <= 0:\n raise ValueError(\"Invalid value for 'subsample': %d. \"\n \"The number of subsamples must be at least one.\"\n % self.subsample)\n\n if self.n_quantiles > self.subsample:\n raise ValueError(\"The number of quantiles cannot be greater than\"\n \" the number of samples used. Got {} quantiles\"\n \" and {} samples.\".format(self.n_quantiles,\n self.subsample))\n\n X = self._check_inputs(X, in_fit=True, copy=False)\n n_samples = X.shape[0]\n\n if self.n_quantiles > n_samples:\n warnings.warn(\"n_quantiles (%s) is greater than the total number \"\n \"of samples (%s). n_quantiles is set to \"\n \"n_samples.\"\n % (self.n_quantiles, n_samples))\n self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples))\n\n rng = check_random_state(self.random_state)\n\n # Create the quantiles of reference\n self.references_ = np.linspace(0, 1, self.n_quantiles_,\n endpoint=True)\n if sparse.issparse(X):\n self._sparse_fit(X, rng)\n else:\n self._dense_fit(X, rng)\n\n return self\n\n def _transform_col(self, X_col, quantiles, inverse):\n \"\"\"Private function to transform a single feature\"\"\"\n\n output_distribution = self.output_distribution\n\n if not inverse:\n lower_bound_x = quantiles[0]\n upper_bound_x = quantiles[-1]\n lower_bound_y = 0\n upper_bound_y = 1\n else:\n lower_bound_x = 0\n upper_bound_x = 1\n lower_bound_y = quantiles[0]\n upper_bound_y = quantiles[-1]\n # for inverse transform, match a uniform distribution\n with np.errstate(invalid='ignore'): # hide NaN comparison warnings\n if output_distribution == 'normal':\n X_col = stats.norm.cdf(X_col)\n # else output distribution is already a uniform distribution\n\n # find index for lower and higher bounds\n with np.errstate(invalid='ignore'): # hide NaN comparison warnings\n if output_distribution == 'normal':\n lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <\n lower_bound_x)\n upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >\n upper_bound_x)\n if output_distribution == 'uniform':\n lower_bounds_idx = (X_col == lower_bound_x)\n upper_bounds_idx = (X_col == upper_bound_x)\n\n isfinite_mask = ~np.isnan(X_col)\n X_col_finite = X_col[isfinite_mask]\n if not inverse:\n # Interpolate in one direction and in the other and take the\n # mean. This is in case of repeated values in the features\n # and hence repeated quantiles\n #\n # If we don't do this, only one extreme of the duplicated is\n # used (the upper when we do ascending, and the\n # lower for descending). We take the mean of these two\n X_col[isfinite_mask] = .5 * (\n np.interp(X_col_finite, quantiles, self.references_)\n - np.interp(-X_col_finite, -quantiles[::-1],\n -self.references_[::-1]))\n else:\n X_col[isfinite_mask] = np.interp(X_col_finite,\n self.references_, quantiles)\n\n X_col[upper_bounds_idx] = upper_bound_y\n X_col[lower_bounds_idx] = lower_bound_y\n # for forward transform, match the output distribution\n if not inverse:\n with np.errstate(invalid='ignore'): # hide NaN comparison warnings\n if output_distribution == 'normal':\n X_col = stats.norm.ppf(X_col)\n # find the value to clip the data to avoid mapping to\n # infinity. Clip such that the inverse transform will be\n # consistent\n clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1))\n clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD -\n np.spacing(1)))\n X_col = np.clip(X_col, clip_min, clip_max)\n # else output distribution is uniform and the ppf is the\n # identity function so we let X_col unchanged\n\n return X_col\n\n def _check_inputs(self, X, in_fit, accept_sparse_negative=False,\n copy=False):\n \"\"\"Check inputs before fit and transform\"\"\"\n # In theory reset should be equal to `in_fit`, but there are tests\n # checking the input number of feature and they expect a specific\n # string, which is not the same one raised by check_n_features. So we\n # don't check n_features_in_ here for now (it's done with adhoc code in\n # the estimator anyway).\n # TODO: set reset=in_fit when addressing reset in\n # predict/transform/etc.\n reset = True\n\n X = self._validate_data(X, reset=reset,\n accept_sparse='csc', copy=copy,\n dtype=FLOAT_DTYPES,\n force_all_finite='allow-nan')\n # we only accept positive sparse matrix when ignore_implicit_zeros is\n # false and that we call fit or transform.\n with np.errstate(invalid='ignore'): # hide NaN comparison warnings\n if (not accept_sparse_negative and not self.ignore_implicit_zeros\n and (sparse.issparse(X) and np.any(X.data < 0))):\n raise ValueError('QuantileTransformer only accepts'\n ' non-negative sparse matrices.')\n\n # check the output distribution\n if self.output_distribution not in ('normal', 'uniform'):\n raise ValueError(\"'output_distribution' has to be either 'normal'\"\n \" or 'uniform'. Got '{}' instead.\".format(\n self.output_distribution))\n\n return X\n\n def _check_is_fitted(self, X):\n \"\"\"Check the inputs before transforming\"\"\"\n check_is_fitted(self)\n # check that the dimension of X are adequate with the fitted data\n if X.shape[1] != self.quantiles_.shape[1]:\n raise ValueError('X does not have the same number of features as'\n ' the previously fitted data. Got {} instead of'\n ' {}.'.format(X.shape[1],\n self.quantiles_.shape[1]))\n\n def _transform(self, X, inverse=False):\n \"\"\"Forward and inverse transform.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n The data used to scale along the features axis.\n\n inverse : bool, optional (default=False)\n If False, apply forward transform. If True, apply\n inverse transform.\n\n Returns\n -------\n X : ndarray, shape (n_samples, n_features)\n Projected data\n \"\"\"\n\n if sparse.issparse(X):\n for feature_idx in range(X.shape[1]):\n column_slice = slice(X.indptr[feature_idx],\n X.indptr[feature_idx + 1])\n X.data[column_slice] = self._transform_col(\n X.data[column_slice], self.quantiles_[:, feature_idx],\n inverse)\n else:\n for feature_idx in range(X.shape[1]):\n X[:, feature_idx] = self._transform_col(\n X[:, feature_idx], self.quantiles_[:, feature_idx],\n inverse)\n\n return X\n\n def transform(self, X):\n \"\"\"Feature-wise transformation of the data.\n\n Parameters\n ----------\n X : ndarray or sparse matrix, shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n Returns\n -------\n Xt : ndarray or sparse matrix, shape (n_samples, n_features)\n The projected data.\n \"\"\"\n X = self._check_inputs(X, in_fit=False, copy=self.copy)\n self._check_is_fitted(X)\n\n return self._transform(X, inverse=False)\n\n def inverse_transform(self, X):\n \"\"\"Back-projection to the original space.\n\n Parameters\n ----------\n X : ndarray or sparse matrix, shape (n_samples, n_features)\n The data used to scale along the features axis. If a sparse\n matrix is provided, it will be converted into a sparse\n ``csc_matrix``. Additionally, the sparse matrix needs to be\n nonnegative if `ignore_implicit_zeros` is False.\n\n Returns\n -------\n Xt : ndarray or sparse matrix, shape (n_samples, n_features)\n The projected data.\n \"\"\"\n X = self._check_inputs(X, in_fit=False, accept_sparse_negative=True,\n copy=self.copy)\n self._check_is_fitted(X)\n\n return self._transform(X, inverse=True)\n\n def _more_tags(self):\n return {'allow_nan': True}\n\n\n@_deprecate_positional_args\ndef quantile_transform(X, *, axis=0, n_quantiles=1000,\n output_distribution='uniform',\n ignore_implicit_zeros=False,\n subsample=int(1e5),\n random_state=None,\n copy=True):\n \"\"\"Transform features using quantiles information.\n\n This method transforms the features to follow a uniform or a normal\n distribution. Therefore, for a given feature, this transformation tends\n to spread out the most frequent values. It also reduces the impact of\n (marginal) outliers: this is therefore a robust preprocessing scheme.\n\n The transformation is applied on each feature independently. First an\n estimate of the cumulative distribution function of a feature is\n used to map the original values to a uniform distribution. The obtained\n values are then mapped to the desired output distribution using the\n associated quantile function. Features values of new/unseen data that fall\n below or above the fitted range will be mapped to the bounds of the output\n distribution. Note that this transform is non-linear. It may distort linear\n correlations between variables measured at the same scale but renders\n variables measured at different scales more directly comparable.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n Parameters\n ----------\n X : array-like, sparse matrix\n The data to transform.\n\n axis : int, (default=0)\n Axis used to compute the means and standard deviations along. If 0,\n transform each feature, otherwise (if 1) transform each sample.\n\n n_quantiles : int, optional (default=1000 or n_samples)\n Number of quantiles to be computed. It corresponds to the number\n of landmarks used to discretize the cumulative distribution function.\n If n_quantiles is larger than the number of samples, n_quantiles is set\n to the number of samples as a larger number of quantiles does not give\n a better approximation of the cumulative distribution function\n estimator.\n\n output_distribution : str, optional (default='uniform')\n Marginal distribution for the transformed data. The choices are\n 'uniform' (default) or 'normal'.\n\n ignore_implicit_zeros : bool, optional (default=False)\n Only applies to sparse matrices. If True, the sparse entries of the\n matrix are discarded to compute the quantile statistics. If False,\n these entries are treated as zeros.\n\n subsample : int, optional (default=1e5)\n Maximum number of samples used to estimate the quantiles for\n computational efficiency. Note that the subsampling procedure may\n differ for value-identical sparse and dense matrices.\n\n random_state : int, RandomState instance or None, optional (default=None)\n Determines random number generation for subsampling and smoothing\n noise.\n Please see ``subsample`` for more details.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`\n\n copy : boolean, optional, (default=True)\n Set to False to perform inplace transformation and avoid a copy (if the\n input is already a numpy array). If True, a copy of `X` is transformed,\n leaving the original `X` unchanged\n\n ..versionchnanged:: 0.23\n The default value of `copy` changed from False to True in 0.23.\n\n Returns\n -------\n Xt : ndarray or sparse matrix, shape (n_samples, n_features)\n The transformed data.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import quantile_transform\n >>> rng = np.random.RandomState(0)\n >>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)\n >>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)\n array([...])\n\n See also\n --------\n QuantileTransformer : Performs quantile-based scaling using the\n ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`).\n power_transform : Maps data to a normal distribution using a\n power transformation.\n scale : Performs standardization that is faster, but less robust\n to outliers.\n robust_scale : Performs robust standardization that removes the influence\n of outliers but does not put outliers and inliers on the same scale.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in fit, and maintained in\n transform.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n \"\"\"\n n = QuantileTransformer(n_quantiles=n_quantiles,\n output_distribution=output_distribution,\n subsample=subsample,\n ignore_implicit_zeros=ignore_implicit_zeros,\n random_state=random_state,\n copy=copy)\n if axis == 0:\n return n.fit_transform(X)\n elif axis == 1:\n return n.fit_transform(X.T).T\n else:\n raise ValueError(\"axis should be either equal to 0 or 1. Got\"\n \" axis={}\".format(axis))\n\n\nclass PowerTransformer(TransformerMixin, BaseEstimator):\n \"\"\"Apply a power transform featurewise to make data more Gaussian-like.\n\n Power transforms are a family of parametric, monotonic transformations\n that are applied to make data more Gaussian-like. This is useful for\n modeling issues related to heteroscedasticity (non-constant variance),\n or other situations where normality is desired.\n\n Currently, PowerTransformer supports the Box-Cox transform and the\n Yeo-Johnson transform. The optimal parameter for stabilizing variance and\n minimizing skewness is estimated through maximum likelihood.\n\n Box-Cox requires input data to be strictly positive, while Yeo-Johnson\n supports both positive or negative data.\n\n By default, zero-mean, unit-variance normalization is applied to the\n transformed data.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n .. versionadded:: 0.20\n\n Parameters\n ----------\n method : str, (default='yeo-johnson')\n The power transform method. Available methods are:\n\n - 'yeo-johnson' [1]_, works with positive and negative values\n - 'box-cox' [2]_, only works with strictly positive values\n\n standardize : boolean, default=True\n Set to True to apply zero-mean, unit-variance normalization to the\n transformed output.\n\n copy : boolean, optional, default=True\n Set to False to perform inplace computation during transformation.\n\n Attributes\n ----------\n lambdas_ : array of float, shape (n_features,)\n The parameters of the power transformation for the selected features.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import PowerTransformer\n >>> pt = PowerTransformer()\n >>> data = [[1, 2], [3, 2], [4, 5]]\n >>> print(pt.fit(data))\n PowerTransformer()\n >>> print(pt.lambdas_)\n [ 1.386... -3.100...]\n >>> print(pt.transform(data))\n [[-1.316... -0.707...]\n [ 0.209... -0.707...]\n [ 1.106... 1.414...]]\n\n See also\n --------\n power_transform : Equivalent function without the estimator API.\n\n QuantileTransformer : Maps data to a standard normal distribution with\n the parameter `output_distribution='normal'`.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in ``fit``, and maintained\n in ``transform``.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n References\n ----------\n\n .. [1] I.K. Yeo and R.A. Johnson, \"A new family of power transformations to\n improve normality or symmetry.\" Biometrika, 87(4), pp.954-959,\n (2000).\n\n .. [2] G.E.P. Box and D.R. Cox, \"An Analysis of Transformations\", Journal\n of the Royal Statistical Society B, 26, 211-252 (1964).\n\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, method='yeo-johnson', *, standardize=True, copy=True):\n self.method = method\n self.standardize = standardize\n self.copy = copy\n\n def fit(self, X, y=None):\n \"\"\"Estimate the optimal parameter lambda for each feature.\n\n The optimal lambda parameter for minimizing skewness is estimated on\n each feature independently using maximum likelihood.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The data used to estimate the optimal transformation parameters.\n\n y : Ignored\n\n Returns\n -------\n self : object\n \"\"\"\n self._fit(X, y=y, force_transform=False)\n return self\n\n def fit_transform(self, X, y=None):\n return self._fit(X, y, force_transform=True)\n\n def _fit(self, X, y=None, force_transform=False):\n X = self._check_input(X, in_fit=True, check_positive=True,\n check_method=True)\n\n if not self.copy and not force_transform: # if call from fit()\n X = X.copy() # force copy so that fit does not change X inplace\n\n optim_function = {'box-cox': self._box_cox_optimize,\n 'yeo-johnson': self._yeo_johnson_optimize\n }[self.method]\n with np.errstate(invalid='ignore'): # hide NaN warnings\n self.lambdas_ = np.array([optim_function(col) for col in X.T])\n\n if self.standardize or force_transform:\n transform_function = {'box-cox': boxcox,\n 'yeo-johnson': self._yeo_johnson_transform\n }[self.method]\n for i, lmbda in enumerate(self.lambdas_):\n with np.errstate(invalid='ignore'): # hide NaN warnings\n X[:, i] = transform_function(X[:, i], lmbda)\n\n if self.standardize:\n self._scaler = StandardScaler(copy=False)\n if force_transform:\n X = self._scaler.fit_transform(X)\n else:\n self._scaler.fit(X)\n\n return X\n\n def transform(self, X):\n \"\"\"Apply the power transform to each feature using the fitted lambdas.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The data to be transformed using a power transformation.\n\n Returns\n -------\n X_trans : array-like, shape (n_samples, n_features)\n The transformed data.\n \"\"\"\n check_is_fitted(self)\n X = self._check_input(X, in_fit=False, check_positive=True,\n check_shape=True)\n\n transform_function = {'box-cox': boxcox,\n 'yeo-johnson': self._yeo_johnson_transform\n }[self.method]\n for i, lmbda in enumerate(self.lambdas_):\n with np.errstate(invalid='ignore'): # hide NaN warnings\n X[:, i] = transform_function(X[:, i], lmbda)\n\n if self.standardize:\n X = self._scaler.transform(X)\n\n return X\n\n def inverse_transform(self, X):\n \"\"\"Apply the inverse power transformation using the fitted lambdas.\n\n The inverse of the Box-Cox transformation is given by::\n\n if lambda_ == 0:\n X = exp(X_trans)\n else:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_)\n\n The inverse of the Yeo-Johnson transformation is given by::\n\n if X >= 0 and lambda_ == 0:\n X = exp(X_trans) - 1\n elif X >= 0 and lambda_ != 0:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1\n elif X < 0 and lambda_ != 2:\n X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))\n elif X < 0 and lambda_ == 2:\n X = 1 - exp(-X_trans)\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The transformed data.\n\n Returns\n -------\n X : array-like, shape (n_samples, n_features)\n The original data\n \"\"\"\n check_is_fitted(self)\n X = self._check_input(X, in_fit=False, check_shape=True)\n\n if self.standardize:\n X = self._scaler.inverse_transform(X)\n\n inv_fun = {'box-cox': self._box_cox_inverse_tranform,\n 'yeo-johnson': self._yeo_johnson_inverse_transform\n }[self.method]\n for i, lmbda in enumerate(self.lambdas_):\n with np.errstate(invalid='ignore'): # hide NaN warnings\n X[:, i] = inv_fun(X[:, i], lmbda)\n\n return X\n\n def _box_cox_inverse_tranform(self, x, lmbda):\n \"\"\"Return inverse-transformed input x following Box-Cox inverse\n transform with parameter lambda.\n \"\"\"\n if lmbda == 0:\n x_inv = np.exp(x)\n else:\n x_inv = (x * lmbda + 1) ** (1 / lmbda)\n\n return x_inv\n\n def _yeo_johnson_inverse_transform(self, x, lmbda):\n \"\"\"Return inverse-transformed input x following Yeo-Johnson inverse\n transform with parameter lambda.\n \"\"\"\n x_inv = np.zeros_like(x)\n pos = x >= 0\n\n # when x >= 0\n if abs(lmbda) < np.spacing(1.):\n x_inv[pos] = np.exp(x[pos]) - 1\n else: # lmbda != 0\n x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1\n\n # when x < 0\n if abs(lmbda - 2) > np.spacing(1.):\n x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,\n 1 / (2 - lmbda))\n else: # lmbda == 2\n x_inv[~pos] = 1 - np.exp(-x[~pos])\n\n return x_inv\n\n def _yeo_johnson_transform(self, x, lmbda):\n \"\"\"Return transformed input x following Yeo-Johnson transform with\n parameter lambda.\n \"\"\"\n\n out = np.zeros_like(x)\n pos = x >= 0 # binary mask\n\n # when x >= 0\n if abs(lmbda) < np.spacing(1.):\n out[pos] = np.log1p(x[pos])\n else: # lmbda != 0\n out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda\n\n # when x < 0\n if abs(lmbda - 2) > np.spacing(1.):\n out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)\n else: # lmbda == 2\n out[~pos] = -np.log1p(-x[~pos])\n\n return out\n\n def _box_cox_optimize(self, x):\n \"\"\"Find and return optimal lambda parameter of the Box-Cox transform by\n MLE, for observed data x.\n\n We here use scipy builtins which uses the brent optimizer.\n \"\"\"\n # the computation of lambda is influenced by NaNs so we need to\n # get rid of them\n _, lmbda = stats.boxcox(x[~np.isnan(x)], lmbda=None)\n\n return lmbda\n\n def _yeo_johnson_optimize(self, x):\n \"\"\"Find and return optimal lambda parameter of the Yeo-Johnson\n transform by MLE, for observed data x.\n\n Like for Box-Cox, MLE is done via the brent optimizer.\n \"\"\"\n\n def _neg_log_likelihood(lmbda):\n \"\"\"Return the negative log likelihood of the observed data x as a\n function of lambda.\"\"\"\n x_trans = self._yeo_johnson_transform(x, lmbda)\n n_samples = x.shape[0]\n\n loglike = -n_samples / 2 * np.log(x_trans.var())\n loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()\n\n return -loglike\n\n # the computation of lambda is influenced by NaNs so we need to\n # get rid of them\n x = x[~np.isnan(x)]\n # choosing bracket -2, 2 like for boxcox\n return optimize.brent(_neg_log_likelihood, brack=(-2, 2))\n\n def _check_input(self, X, in_fit, check_positive=False, check_shape=False,\n check_method=False):\n \"\"\"Validate the input before fit and transform.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n\n check_positive : bool\n If True, check that all data is positive and non-zero (only if\n ``self.method=='box-cox'``).\n\n check_shape : bool\n If True, check that n_features matches the length of self.lambdas_\n\n check_method : bool\n If True, check that the transformation method is valid.\n \"\"\"\n X = self._validate_data(X, ensure_2d=True, dtype=FLOAT_DTYPES,\n copy=self.copy, force_all_finite='allow-nan')\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\n 'ignore', r'All-NaN (slice|axis) encountered')\n if (check_positive and self.method == 'box-cox' and\n np.nanmin(X) <= 0):\n raise ValueError(\"The Box-Cox transformation can only be \"\n \"applied to strictly positive data\")\n\n if check_shape and not X.shape[1] == len(self.lambdas_):\n raise ValueError(\"Input data has a different number of features \"\n \"than fitting data. Should have {n}, data has {m}\"\n .format(n=len(self.lambdas_), m=X.shape[1]))\n\n valid_methods = ('box-cox', 'yeo-johnson')\n if check_method and self.method not in valid_methods:\n raise ValueError(\"'method' must be one of {}, \"\n \"got {} instead.\"\n .format(valid_methods, self.method))\n\n return X\n\n def _more_tags(self):\n return {'allow_nan': True}\n\n\n@_deprecate_positional_args\ndef power_transform(X, method='yeo-johnson', *, standardize=True, copy=True):\n \"\"\"\n Power transforms are a family of parametric, monotonic transformations\n that are applied to make data more Gaussian-like. This is useful for\n modeling issues related to heteroscedasticity (non-constant variance),\n or other situations where normality is desired.\n\n Currently, power_transform supports the Box-Cox transform and the\n Yeo-Johnson transform. The optimal parameter for stabilizing variance and\n minimizing skewness is estimated through maximum likelihood.\n\n Box-Cox requires input data to be strictly positive, while Yeo-Johnson\n supports both positive or negative data.\n\n By default, zero-mean, unit-variance normalization is applied to the\n transformed data.\n\n Read more in the :ref:`User Guide <preprocessing_transformer>`.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n The data to be transformed using a power transformation.\n\n method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'\n The power transform method. Available methods are:\n\n - 'yeo-johnson' [1]_, works with positive and negative values\n - 'box-cox' [2]_, only works with strictly positive values\n\n .. versionchanged:: 0.23\n The default value of the `method` parameter changed from\n 'box-cox' to 'yeo-johnson' in 0.23.\n\n standardize : boolean, default=True\n Set to True to apply zero-mean, unit-variance normalization to the\n transformed output.\n\n copy : boolean, optional, default=True\n Set to False to perform inplace computation during transformation.\n\n Returns\n -------\n X_trans : array-like, shape (n_samples, n_features)\n The transformed data.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.preprocessing import power_transform\n >>> data = [[1, 2], [3, 2], [4, 5]]\n >>> print(power_transform(data, method='box-cox'))\n [[-1.332... -0.707...]\n [ 0.256... -0.707...]\n [ 1.076... 1.414...]]\n\n See also\n --------\n PowerTransformer : Equivalent transformation with the\n ``Transformer`` API (e.g. as part of a preprocessing\n :class:`sklearn.pipeline.Pipeline`).\n\n quantile_transform : Maps data to a standard normal distribution with\n the parameter `output_distribution='normal'`.\n\n Notes\n -----\n NaNs are treated as missing values: disregarded in ``fit``, and maintained\n in ``transform``.\n\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n\n References\n ----------\n\n .. [1] I.K. Yeo and R.A. Johnson, \"A new family of power transformations to\n improve normality or symmetry.\" Biometrika, 87(4), pp.954-959,\n (2000).\n\n .. [2] G.E.P. Box and D.R. Cox, \"An Analysis of Transformations\", Journal\n of the Royal Statistical Society B, 26, 211-252 (1964).\n \"\"\"\n pt = PowerTransformer(method=method, standardize=standardize, copy=copy)\n return pt.fit_transform(X)\n"
] |
[
[
"sklearn.datasets.make_classification",
"numpy.allclose",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"sklearn.datasets.make_regression",
"numpy.zeros"
],
[
"scipy.stats.norm.ppf",
"scipy.optimize.brent",
"scipy.stats.norm.cdf"
]
] |
pawlodkowski/Movie_Recommender
|
[
"2294081ec439b43feb2596835aa3508c7a3e4d28"
] |
[
"MovieRec/mvr/core.py"
] |
[
"###############################################################################\n''' \nSpicy Movie Recommender 5000\n\n\n# todo\n# apply superior filtering algorithm\n# magic merger returns duplicates sometimes...\n# prepare data for sending it to the website --> convert to IMDB Ids\n'''\n############################################################################### Imports\n\nimport sqlite3\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom more_itertools import unique_everseen\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport os\n\n############################################################################### Functions\n\ndef get_data_from_db(database_directory, tablename):\n db = sqlite3.connect(database_directory)\n query = f\"SELECT * FROM {tablename}\"\n df = pd.read_sql(query, db)\n db.close()\n return df\n\n\ndef get_all_movie_ids(database_directory, tablename = \"mvr_ratings\"):\n d0 = get_data_from_db(database_directory, tablename)\n all_movie_ids = sorted(set(d0[\"movieId\"]))\n return all_movie_ids\n\n\ndef create_users_vs_movies_matrix(df):\n df = df.drop(\"timestamp\", axis = 1)\n df = df.drop(\"id\", axis = 1)\n df = df.set_index([\"userId\", \"movieId\"])\n df = df.unstack()\n df = df.fillna(0)\n return df\n\n\ndef load_NMF_model(NMF_model_directory):\n loaded_model = pickle.load(open(NMF_model_directory, \"rb\"))\n return loaded_model\n\n\ndef apply_NMF(trained_model, converted_user_input, filtered_movie_ids, all_movie_ids):\n\n # preparing P and Q\n P = trained_model.components_\n user_Q = trained_model.transform([converted_user_input])\n\n # creating new R, = list of NMF_scores for particular user\n user_R = np.dot(user_Q, P)\n NMF_scores_user = list(user_R[0])\n\n # labelling NMF_scores with movie ids\n recommendations_with_movie_id = dict(zip(all_movie_ids, NMF_scores_user))\n\n # applying filters\n recommendations_with_movie_id_filtered = list([[x,recommendations_with_movie_id[x]] for x in filtered_movie_ids])\n results = pd.DataFrame(recommendations_with_movie_id_filtered, columns = [\"movieId\", \"NMF_score\"])\n\n results = results.sort_values(by = \"NMF_score\", ascending = False)\n output = list(results[\"movieId\"])\n return output\n\n\ndef apply_CF(converted_user_input, all_movie_ids, users_vs_movies_matrix, filtered_movie_ids):\n\n def make_cosine_heatmap(df):\n labels = list(df.index.values)\n cosine_similarities = cosine_similarity(df)\n results_df = pd.DataFrame(cosine_similarities, columns = labels, index = labels)\n return results_df\n\n def get_similar_users(cosine_heatmap, userId):\n results_for_one_user = cosine_heatmap.loc[userId].sort_values(ascending = False) # get similar users\n similar_users = list(results_for_one_user.keys())[1:]\n return similar_users\n\n #create user row in dataframe\n #user_input_df_row = pd.DataFrame(converted_user_input, index = all_movie_ids).transpose()\n\n # add that user row into existing df\n users_vs_movies_matrix_complemented = users_vs_movies_matrix.copy()\n users_vs_movies_matrix_complemented.loc[0] = converted_user_input#user_input_df_row\n\n # determine ordered list of similar users via cosine similarity\n cosine_heatmap = make_cosine_heatmap(users_vs_movies_matrix_complemented)\n similar_users = get_similar_users(cosine_heatmap, 0)\n\n # apply filters\n filtered_uvmm_complemented = users_vs_movies_matrix_complemented[\"rating\"][filtered_movie_ids]\n\n # go through every similar user, starting from most similar one, check for conditions below and append movieId to list\n recommended_movie_ids = []\n for user in similar_users:\n checking_movies = ((filtered_uvmm_complemented.loc[0] == 0.0) & (filtered_uvmm_complemented.loc[user] == 5.0))\n d = dict(checking_movies)\n recommended_movie_ids_from_user = list(filter(d.get, d)) #returning keys (movieids) from dict where value is True\n recommended_movie_ids += recommended_movie_ids_from_user\n recommended_movie_ids = list(unique_everseen(recommended_movie_ids)) # delete duplicates from list keeping their order\n if len(recommended_movie_ids) >= 30:\n break\n return recommended_movie_ids\n\ndef apply_filtering(website_filters, database_directory):\n \n def splitter1(x):\n return \",\".join(x.split(\"|\")).lower()\n def splitter2(x):\n return \",\".join(x.split(\",\")).lower()\n def splitter3(x):\n return x.split(\",\")\n \n # make searchable extract: keywords vs movieIds\n # load data\n d0 = get_data_from_db(database_directory, \"mvr_ratings\")###################new\n all_rated_movie_ids = sorted(set(d0[\"movieId\"]))###########################new\n d0r = pd.DataFrame(all_rated_movie_ids, columns = [\"movieId\"])#############new \n \n dm = get_data_from_db(database_directory, \"mvr_movielens\")\n dt = get_data_from_db(database_directory, \"mvr_tags\")\n # make genres to a string\n dm[\"genres\"] = dm[\"genres\"].apply(splitter1)\n # group tags per movieid and make string out of it\n dt = dt.groupby([\"movieId\"])['tag'].apply(lambda x: ','.join(x)).reset_index()\n dt[\"tag\"] = dt[\"tag\"].apply(splitter2)\n # merge combined tags on dm\n df1 = pd.merge(d0r, dm, how=\"left\", on = \"movieId\")########################new\n df = pd.merge(df1, dt, how = \"left\", on = \"movieId\")#######################new \n\n df[\"tag\"].fillna(value = \"\", inplace = True)\n # keyword column\n df[\"keywords\"] = df[\"genres\"] + df[\"tag\"] + df[\"title\"]\n # make extact for searching\n keywords = list(df[\"keywords\"])\n movieIds = list(df[\"movieId\"])\n keywords_and_movieids = list(zip(keywords, movieIds))\n \n #get results\n filters = website_filters.lower().replace(\",\", \" \").split(\" \")\n \n raw = website_filters.lower().replace(\",\", \" \").strip().split(\" \")\n while '' in raw:\n raw.remove('')\n \n filtered_ids = []\n for i, movie in enumerate(keywords_and_movieids):\n found = []\n for keyword in filters:\n if keyword.lower() in keywords_and_movieids[i][0]:\n found.append(True)\n else:\n found.append(False)\n if all(found):\n filtered_ids.append(keywords_and_movieids[i][1])\n else:\n continue\n filtered_ids = sorted(list(set(filtered_ids)))\n \n return filtered_ids\n \n \n\ndef movieIds_by_genre(desired_genre, database_directory):\n\n db = sqlite3.connect(database_directory)\n query = '''SELECT title, genres, mvr_ratings.*, mvr_tags.tag, mvr_tags.timestamp AS ts\n FROM mvr_movielens\n JOIN mvr_ratings ON mvr_movielens.movieId = mvr_ratings.movieId\n LEFT JOIN mvr_tags ON mvr_movielens.movieID = mvr_tags.movieID AND mvr_ratings.userId = mvr_tags.userId'''\n \n dataframe = pd.read_sql(query, db)\n db.close()\n \n \n genres = list(dataframe['genres'].unique())\n\n genres_split = []\n for g in genres:\n sublist = g.split('|')\n genres_split.append(sublist)\n\n flat_list = [item for sublist in genres_split for item in sublist]\n\n def unique_list(list):\n a = []\n for b in list:\n if b not in a:\n a.append(b)\n return a\n\n unique_genres = unique_list(flat_list)\n\n for g in unique_genres:\n\n col_to_add = []\n for i in list(dataframe['genres']):\n if g in i:\n col_to_add.append(1)\n else:\n col_to_add.append(0)\n\n dataframe['Genre_{}'.format(g)] = col_to_add\n\n# del dataframe['genres']\n# #optional\n\n ids = dataframe['movieId']\n bools = dataframe['Genre_{}'.format(desired_genre)].values\n z = list(zip(ids, bools))\n list_movies = []\n for pair in z:\n if pair[1] == 1:\n list_movies.append(pair[0])\n\n return unique_list(list_movies)\n\n\ndef translator_dictionary(database_directory):\n\n db = sqlite3.connect(database_directory)\n query = \"SELECT movieId, imdbId FROM mvr_links\"\n df_translator = pd.read_sql(query, db)\n movie_IDs = list(df_translator['movieId'])\n IMDB_IDs = list(df_translator['imdbId'])\n\n #ML_2_IMDB = dict(zip(movie_IDs, IMDB_IDs))\n IMDB_2_ML = dict(zip(IMDB_IDs, movie_IDs))\n db.close()\n\n return IMDB_2_ML\n\n\ndef convert_django(dataframe, django_data, database_directory):\n\n new_user_row = pd.DataFrame(np.zeros(shape=(1,len(dataframe.columns))),\n columns=dataframe.columns)\n\n dfs_to_concat = [dataframe, new_user_row]\n combined = pd.concat(dfs_to_concat)\n\n translator = translator_dictionary(database_directory)\n\n #Assuming that data coming from Django interface is a list of tuples\n for pair in django_data:\n movieId = translator[pair[0]]\n combined.loc[0,(\"rating\", movieId)] = pair[1]\n #where 0 represents userId = 0, aka the new user\n\n return list(combined.loc[0])\n\n\ndef convert_ids_to_titles(id_list,database_directory):\n \n db = sqlite3.connect(database_directory)\n query = \"SELECT movieId, title FROM mvr_movielens\"\n df_translator = pd.read_sql(query, db)\n movie_IDs = list(df_translator['movieId'])\n titles = list(df_translator['title'])\n\n id_2_title = dict(zip(movie_IDs, titles))\n\n titles = []\n for i in id_list:\n titles.append(id_2_title[i])\n \n db.close()\n return titles\n\ndef back_2_IMDB(id_list, database_directory):\n \n db = sqlite3.connect(database_directory)\n query = \"SELECT movieId, imdbId FROM mvr_links\"\n df_translator = pd.read_sql(query, db)\n movie_IDs = list(df_translator['movieId'])\n IMDB_IDs = list(df_translator['imdbId'])\n \n ML_2_IMDB = dict(zip(movie_IDs, IMDB_IDs))\n \n converted_ids = []\n for i in id_list:\n converted_ids.append(ML_2_IMDB[i])\n \n db.close()\n \n return converted_ids\n\ndef magic_merging(NMF, CF):\n # create function that merges the results of NMF and collaborative filtering\n if len(CF) > 20:\n NMF_reduced = NMF[:len(CF)]\n overlap = [x for i,x in enumerate(NMF_reduced) if NMF_reduced[i] in CF]\n if len(overlap) < 10:\n result = NMF[:5]+CF[:5]\n else:\n result = overlap[:10]\n else:\n result = NMF[:10]\n return result\n\n\n#def convert_to_imdbid(movieids):\n # converts list of movieids into list of imdb ids\n# return imdbids\n\n\n#def make_poster_links(imdbids):\n # return a list of posterlinks according to the imdbids\n# return poster_links\n\n\n\n############################################################################### main function\n\ndef recommender(website_user_ratings, website_filters):\n\n PFAD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n \n #database_directory = \"data/movies.db\"\n #database_directory = \"../movies.sqlite3\"\n database_directory = PFAD + \"\\movies.sqlite3\"\n #NMF_model_directory = \"data/NMF_model_trained.sav\"\n NMF_model_directory = PFAD + \"\\\\NMF_model_trained.sav\"\n\n d0 = get_data_from_db(database_directory, \"mvr_ratings\")\n users_vs_movies_matrix = create_users_vs_movies_matrix(d0)\n\n converted_user_input = convert_django(users_vs_movies_matrix, website_user_ratings, database_directory)\n #filtered_movie_ids = movieIds_by_genre(website_filters, database_directory)\n filtered_movie_ids = apply_filtering(website_filters, database_directory)\n\n all_movie_ids = get_all_movie_ids(database_directory, tablename = \"mvr_ratings\")\n trained_model = load_NMF_model(NMF_model_directory)\n \n if len(filtered_movie_ids) == 0:\n filtered_movie_ids = all_movie_ids\n \n #filtered_movie_ids = all_movie_ids\n\n NMF_results = apply_NMF(trained_model, converted_user_input, filtered_movie_ids, all_movie_ids)\n #CF_results = apply_CF(converted_user_input, all_movie_ids, users_vs_movies_matrix, filtered_movie_ids)\n\n #magic_recoms = magic_merging(NMF_results, CF_results)\n magic_recoms = NMF_results\n \n recommended_movie_titles = back_2_IMDB(magic_recoms, database_directory)\n # map to imbdid\n # make poster links\n\n return recommended_movie_titles[:10]\n #return len(converted_user_input)\n\n#PFAD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nwebsite_user_ratings = [('0092991', 5.0)]\nwebsite_filters = \"horror\"\n#settings.configure()\n#print(PFAD + \"\\\\NMF_model_trained.sav\")\nprint(recommender(website_user_ratings, website_filters))\n#test = (recommender(website_user_ratings, website_filters))\n#test"
] |
[
[
"numpy.dot",
"pandas.merge",
"pandas.concat",
"sklearn.metrics.pairwise.cosine_similarity",
"pandas.DataFrame",
"pandas.read_sql"
]
] |
pekzeki/stlthmd_task
|
[
"0824d17a727f4a09a4ba2c7f909b12d0880cd289"
] |
[
"src/features/build_features.py"
] |
[
"from nltk.tokenize import RegexpTokenizer\nfrom sklearn.model_selection import train_test_split\nfrom collections import Counter\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer, SnowballStemmer\nfrom sklearn.preprocessing import LabelEncoder\nimport re\nimport string\nfrom sklearn.preprocessing import LabelEncoder\n\n\n\ndef line_word_length(line):\n line.translate(None, string.punctuation)\n words = line.split()\n # words = re.split(\"[\\p{Punct}\\s]+\", line)\n\n return len(words)\n\ndef has_entity(line, entity):\n if entity in line:\n return 1\n else:\n return 0\n\n\ndef extra_features(df):\n\n df['has_citation'] = df['sentence'].apply(lambda row: has_entity(row, 'CITATION'))\n df['has_symbol'] = df['sentence'].apply(lambda row: has_entity(row, 'SYMBOL'))\n df['has_number'] = df['sentence'].apply(lambda row: has_entity(row, 'NUMBER'))\n df['word_length'] = df['sentence'].apply(line_word_length)\n\n lb_make = LabelEncoder()\n df[\"domain\"] = lb_make.fit_transform(df[\"domain\"])\n df[\"section\"] = lb_make.fit_transform(df[\"section\"])\n\n return df\n\n\ndef calculate_label(a1, a2, a3):\n count = Counter([a1, a2, a3])\n if len(count) == 3:\n label = a3\n elif len(count) == 2 and a1 == a2:\n label = a2\n else:\n label = a3\n\n return label\n\n\ndef set_final_label(df):\n df['final_label'] = df.apply(\n lambda row: calculate_label(row['annotator_1'], row['annotator_2'], row['annotator_3']), axis=1)\n df = df.drop(columns=[\"annotator_1\", \"annotator_2\", \"annotator_3\"])\n # label_conversion = {\"final_label\": {\"AIMX\": 1, \"OWNX\": 2, \"CONT\": 3, \"BASE\": 4, \"MISC\": 5}}\n # df.replace(label_conversion, inplace=True)\n\n return df\n\n\ndef split_sentence_data(df):\n\n X = df.iloc[:, 4].values\n y = df.iloc[:, 5].values\n\n return train_test_split(X, y, test_size=0.25, random_state=42)\n\n\ndef split_data(df):\n\n X = df[['sentence', 'domain', 'section', 'line', 'word_length', 'has_citation', 'has_symbol', 'has_number']].values\n y = df['final_label'].values\n\n return train_test_split(X, y, test_size=0.25, random_state=42)\n\n\ndef get_stop_words(stopwords_file):\n \"\"\"\n Reads the provided stopwords file\n\n :param stopwords_file:\n :return:\n \"\"\"\n\n stopwords = []\n with open(stopwords_file) as input_data:\n for line in input_data:\n stopwords.append(line.strip())\n return stopwords\n\n\ndef process_text(text, stopwords_file=None, stemming=False, lemmetization=False):\n\n text = ''.join([i for i in text if not i.isdigit()])\n filtered_words = []\n\n entity_list = ['CITATION', 'NUMBER', 'SYMBOL']\n #stemmer = SnowballStemmer(\"english\")\n stemmer = PorterStemmer()\n lemmatiser = WordNetLemmatizer()\n\n\n if stopwords_file is not None:\n\n stopwords = get_stop_words(stopwords_file)\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(text)\n\n for w in tokens:\n\n # Do not touch entities\n if any(entity in w for entity in entity_list):\n w = w.upper()\n else:\n w = w.lower()\n\n if not w in stopwords:\n if stemming:\n w = stemmer.stem(w)\n if lemmetization:\n w = lemmatiser.lemmatize(w)\n filtered_words.append(w)\n\n\n return \" \".join(filtered_words)\n\n else:\n return text\n\n\n\n"
] |
[
[
"sklearn.preprocessing.LabelEncoder",
"sklearn.model_selection.train_test_split"
]
] |
marionleborgne/EIT_Dashboard
|
[
"057e05d20325fa6a9b9b0183f63a73c8b7e6cd3b"
] |
[
"sandbox/sand/OpenEIT/reconstruction/pyeit/eit/fem.py"
] |
[
"# coding: utf-8\n# pylint: disable=invalid-name, no-member, too-many-locals\n# pylint: disable=too-many-instance-attributes\n\"\"\" 2D/3D FEM routines \"\"\"\n# Copyright (c) Benyuan Liu. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\nfrom __future__ import division, absolute_import, print_function\n\nfrom collections import namedtuple\nimport numpy as np\nimport numpy.linalg as la\nfrom scipy import sparse\n\nfrom .utils import eit_scan_lines\n\n\nclass Forward(object):\n \"\"\" FEM forward computing code \"\"\"\n\n def __init__(self, mesh, el_pos):\n \"\"\"\n A good FEM forward solver should only depend on\n mesh structure and the position of electrodes\n NOTE: the nodes are assumed continuous numbered.\n\n Parameters\n ----------\n mesh : dict\n mesh structure\n el_pos : NDArray\n numbering of electrodes positions\n \"\"\"\n self.pts = mesh['node']\n self.tri = mesh['element']\n self.tri_perm = mesh['perm']\n self.el_pos = el_pos\n\n # reference electrodes [ref node should not be on electrodes]\n ref_el = 0\n while ref_el in self.el_pos:\n ref_el = ref_el + 1\n self.ref = ref_el\n\n # infer dimensions from mesh\n self.n_pts, self.n_dim = self.pts.shape\n self.n_tri, self.n_vertices = self.tri.shape\n self.ne = el_pos.size\n\n def solve_eit(self, ex_mat=None, step=1, perm=None, parser=None):\n \"\"\"\n EIT simulation, generate perturbation matrix and forward v\n\n Parameters\n ----------\n ex_mat : NDArray\n numLines x n_el array, stimulation matrix\n step : int\n the configuration of measurement electrodes (default: adjacent)\n perm : NDArray\n Mx1 array, initial x0. must be the same size with self.tri_perm\n parser : str\n if parser is 'fmmu', within each stimulation pattern, diff_pairs\n or boundary measurements are re-indexed and started\n from the positive stimulus electrode\n if parser is 'std', subtract_row start from the 1st electrode\n\n Returns\n -------\n jac : NDArray\n number of measures x n_E complex array, the Jacobian\n v : NDArray\n number of measures x 1 array, simulated boundary measures\n b_matrix : NDArray\n back-projection mappings (smear matrix)\n \"\"\"\n # initialize/extract the scan lines (default: apposition)\n if ex_mat is None:\n ex_mat = eit_scan_lines(16, 8)\n\n # initialize the permittivity on element\n if perm is None:\n perm0 = self.tri_perm\n elif np.isscalar(perm):\n perm0 = np.ones(self.n_tri, dtype=np.float)\n else:\n assert perm.shape == (self.n_tri,)\n perm0 = perm\n\n # calculate f and Jacobian iteratively over all stimulation lines\n jac, v, b_matrix = [], [], []\n n_lines = ex_mat.shape[0]\n\n for i in range(n_lines):\n # FEM solver of one stimulation pattern, a row in ex_mat\n ex_line = ex_mat[i]\n f, jac_i = self.solve(ex_line, perm0)\n f_el = f[self.el_pos]\n\n # boundary measurements, subtract_row-voltages on electrodes\n diff_op = voltage_meter(ex_line, n_el=self.ne,step=step, parser=parser)\n \n # print ('ex_mat')\n # print (ex_line)\n # print (diff_op)\n\n v_diff = subtract_row(f_el, diff_op)\n jac_diff = subtract_row(jac_i, diff_op)\n\n # build bp projection matrix\n # 1. we can either smear at the center of elements, using\n # >> fe = np.mean(f[self.tri], axis=1)\n # 2. or, simply smear at the nodes using f\n b = smear(f, f_el, diff_op)\n\n # append\n v.append(v_diff)\n jac.append(jac_diff)\n b_matrix.append(b)\n\n # update output, now you can call p.jac, p.v, p.b_matrix\n pde_result = namedtuple(\"pde_result\", ['jac', 'v', 'b_matrix'])\n p = pde_result(jac=np.vstack(jac),\n v=np.hstack(v),\n b_matrix=np.vstack(b_matrix))\n return p\n\n def solve(self, ex_line, perm):\n \"\"\"\n with one pos (A), neg(B) driven pairs, calculate and\n compute the potential distribution (complex-valued)\n\n Parameters\n ex_line : NDArray\n stimulation (scan) patterns/lines\n perm : NDArray\n permittivity on elements (initial)\n\n Returns\n -------\n f : NDArray\n potential on nodes\n J : NDArray\n Jacobian\n \"\"\"\n # 1. calculate local stiffness matrix (on each element)\n ke = calculate_ke(self.pts, self.tri)\n\n # 2. assemble to global K\n kg = assemble_sparse(ke, self.tri, perm, self.n_pts, ref=self.ref)\n\n # 3. calculate electrode impedance matrix R = K^{-1}\n r_matrix = la.inv(kg)\n r_el = r_matrix[self.el_pos]\n\n # 4. solving nodes potential using boundary conditions\n b = self._natural_boundary(ex_line)\n f = np.dot(r_matrix, b).ravel()\n\n # 5. build Jacobian matrix column wise (element wise)\n # Je = Re*Ke*Ve = (nex3) * (3x3) * (3x1)\n jac = np.zeros((self.ne, self.n_tri), dtype=perm.dtype)\n for (i, e) in enumerate(self.tri):\n jac[:, i] = np.dot(np.dot(r_el[:, e], ke[i]), f[e])\n\n return f, jac\n\n def _natural_boundary(self, ex_line):\n \"\"\"\n Notes\n -----\n Generate the Neumann boundary condition. In utils.py,\n you should note that ex_line is local indexed from 0...15,\n which need to be converted to global node number using el_pos.\n \"\"\"\n drv_a_global = self.el_pos[ex_line[0]]\n drv_b_global = self.el_pos[ex_line[1]]\n\n # global boundary condition\n b = np.zeros((self.n_pts, 1))\n b[drv_a_global] = 1.\n b[drv_b_global] = -1.\n\n return b\n\n\ndef smear(f, fb, pairs):\n \"\"\"\n build smear matrix B for bp\n\n Parameters\n ----------\n f : NDArray\n potential on nodes\n fb : NDArray\n potential on adjacent electrodes\n pairs : NDArray\n electrodes numbering pairs\n\n Returns\n -------\n NDArray\n back-projection matrix\n \"\"\"\n b_matrix = []\n for i, j in pairs:\n f_min, f_max = min(fb[i], fb[j]), max(fb[i], fb[j])\n b_matrix.append((f_min < f) & (f <= f_max))\n\n return np.array(b_matrix)\n\n\ndef subtract_row(v, pairs):\n \"\"\"\n v_diff[k] = v[i, :] - v[j, :]\n\n Parameters\n ----------\n v : NDArray\n Nx1 boundary measurements vector or NxM matrix\n pairs : NDArray\n Nx2 subtract_row pairs\n\n Returns\n -------\n NDArray\n difference measurements\n \"\"\"\n i = pairs[:, 0]\n j = pairs[:, 1]\n # row-wise/element-wise operation on matrix/vector v\n v_diff = v[i] - v[j]\n\n return v_diff\n\n\ndef voltage_meter(ex_line, n_el=16, step=1, parser=None):\n \"\"\"\n extract subtract_row-voltage measurements on boundary electrodes.\n we direct operate on measurements or Jacobian on electrodes,\n so, we can use LOCAL index in this module, do not require el_pos.\n\n Notes\n -----\n A : current driving electrode\n B : current sink\n M, N : boundary electrodes, where v_diff = v_n - v_m\n\n Parameters\n ----------\n ex_line : NDArray\n 2x1 array, 0 for positive electrode, 1 for negative electrode\n n_el : int\n number of electrodes\n step : int\n measurement method (which two electrodes are used for measuring)\n parser : str\n if parser is 'fmmu', data are trimmed, start index (i) is always 'A'.\n\n Returns\n -------\n v : NDArray\n (N-1)*2 arrays of subtract_row pairs\n \"\"\"\n # local node\n drv_a = ex_line[0]\n drv_b = ex_line[1]\n i0 = drv_a if parser == 'fmmu' else 0\n\n # build differential pairs\n v = []\n for a in range(i0, i0 + n_el):\n m = a % n_el\n n = (m + step) % n_el\n # if any of the electrodes is the stimulation electrodes\n if not(m == drv_a or m == drv_b or n == drv_a or n == drv_b):\n # the order of m, n matters\n v.append([n, m])\n\n diff_pairs = np.array(v)\n return diff_pairs\n\n\ndef assemble(ke, tri, perm, n_pts, ref=0):\n \"\"\"\n Assemble the stiffness matrix (dense matrix, default)\n\n Parameters\n ----------\n ke : NDArray\n n_tri x (n_dim x n_dim) 3d matrix\n tri : NDArray\n the structure of mesh\n perm : NDArray\n n_tri x 1 conductivities on elements\n n_pts : int\n number of nodes\n ref : int\n reference electrode\n\n Returns\n -------\n NDArray\n k_matrix, NxN array of complex stiffness matrix\n\n Notes\n -----\n you can use sparse matrix (IJV) format to automatically add the local\n stiffness matrix to the global matrix.\n \"\"\"\n n_tri = tri.shape[0]\n\n # assemble global stiffness matrix\n k_global = np.zeros((n_pts, n_pts), dtype=perm.dtype)\n for ei in range(n_tri):\n k_local = ke[ei]\n pe = perm[ei]\n\n no = tri[ei, :]\n ij = np.ix_(no, no)\n k_global[ij] += (k_local * pe)\n\n # place reference electrode\n if 0 <= ref < n_pts:\n k_global[ref, :] = 0.\n k_global[:, ref] = 0.\n k_global[ref, ref] = 1.\n\n return k_global\n\n\ndef assemble_sparse(ke, tri, perm, n_pts, ref=0):\n \"\"\"\n Assemble the stiffness matrix (using sparse matrix)\n\n Parameters\n ----------\n ke : NDArray\n n_tri x (n_dim x n_dim) 3d matrix\n tri : NDArray\n the structure of mesh\n perm : NDArray\n n_tri x 1 conductivities on elements\n n_pts : int\n number of nodes\n ref : int\n reference electrode\n\n Returns\n -------\n NDArray\n k_matrix, NxN array of complex stiffness matrix\n\n Notes\n -----\n you may use sparse matrix (IJV) format to automatically add the local\n stiffness matrix to the global matrix.\n \"\"\"\n n_tri, n_vertices = tri.shape\n\n # New: use IJV indexed sparse matrix to assemble K (fast, prefer)\n # index = np.array([np.meshgrid(no, no, indexing='ij') for no in tri])\n # note: meshgrid is slow, using handcraft sparse index, for example\n # let tri=[[1, 2, 3], [4, 5, 6]], then indexing='ij' is equivalent to\n # row = [1, 1, 1, 2, 2, 2, ...]\n # col = [1, 2, 3, 1, 2, 3, ...]\n row = np.repeat(tri, n_vertices).ravel()\n col = np.repeat(tri, n_vertices, axis=0).ravel()\n data = np.array([ke[i] * perm[i] for i in range(n_tri)]).ravel()\n\n # set reference nodes before constructing sparse matrix, where\n # K[ref, :] = 0, K[:, ref] = 0, K[ref, ref] = 1.\n # write your own mask code to set the corresponding locations of data\n # before building the sparse matrix, for example,\n # data = mask_ref_node(data, row, col, ref)\n\n # for efficient sparse inverse (csc)\n A = sparse.csr_matrix((data, (row, col)),\n shape=(n_pts, n_pts), dtype=perm.dtype)\n\n # the stiffness matrix may not be sparse\n A = A.toarray()\n\n # place reference electrode\n if 0 <= ref < n_pts:\n A[ref, :] = 0.\n A[:, ref] = 0.\n A[ref, ref] = 1.\n\n return A\n\n\ndef calculate_ke(pts, tri):\n \"\"\"\n Calculate local stiffness matrix on all elements.\n\n Parameters\n ----------\n pts : NDArray\n Nx2 (x,y) or Nx3 (x,y,z) coordinates of points\n tri : NDArray\n Mx3 (triangle) or Mx4 (tetrahedron) connectivity of elements\n\n Returns\n -------\n ke_array : NGArray\n n_tri x (n_dim x n_dim) 3d matrix\n \"\"\"\n n_tri, n_vertices = tri.shape\n\n # check dimension\n # '3' : triangles\n # '4' : tetrahedrons\n if n_vertices == 3:\n _k_local = _k_triangle\n elif n_vertices == 4:\n _k_local = _k_tetrahedron\n else:\n raise TypeError('The num of vertices of elements must be 3 or 4')\n\n # default data types for ke\n ke_array = np.zeros((n_tri, n_vertices, n_vertices))\n for ei in range(n_tri):\n no = tri[ei, :]\n xy = pts[no]\n\n # compute the KIJ (permittivity=1.)\n ke = _k_local(xy)\n ke_array[ei] = ke\n\n return ke_array\n\n\ndef _k_triangle(xy):\n \"\"\"\n given a point-matrix of an element, solving for Kij analytically\n using barycentric coordinates (simplex coordinates)\n\n Parameters\n ----------\n xy : NDArray\n (x,y) of nodes 1,2,3 given in counterclockwise manner\n\n Returns\n -------\n ke_matrix : NDArray\n local stiffness matrix\n \"\"\"\n # edges (vector) of triangles\n s = xy[[2, 0, 1]] - xy[[1, 2, 0]]\n # s1 = xy[2, :] - xy[1, :]\n # s2 = xy[0, :] - xy[2, :]\n # s3 = xy[1, :] - xy[0, :]\n\n # area of triangles\n # TODO: remove abs, user must make sure all triangles are CCW.\n # at = 0.5 * la.det(s[[0, 1]])\n at = np.abs(0.5 * det2x2(s[0], s[1]))\n\n # (e for element) local stiffness matrix\n ke_matrix = np.dot(s, s.T) / (4. * at)\n\n return ke_matrix\n\n\ndef det2x2(s1, s2):\n \"\"\"Calculate the determinant of a 2x2 matrix\"\"\"\n return s1[0]*s2[1] - s1[1]*s2[0]\n\n\ndef _k_tetrahedron(xy):\n \"\"\"\n given a point-matrix of an element, solving for Kij analytically\n using barycentric coordinates (simplex coordinates)\n\n Parameters\n ----------\n xy : NDArray\n (x,y) of nodes 1, 2, 3, 4 given in counterclockwise manner,\n see notes.\n\n Returns\n -------\n ke_matrix : NDArray\n local stiffness matrix\n\n Notes\n -----\n A tetrahedron is described using [0, 1, 2, 3] (local node index) or\n [171, 27, 9, 53] (global index). Counterclockwise (CCW) is defined\n such that the barycentric coordinate of face (1->2->3) is positive.\n \"\"\"\n s = xy[[2, 3, 0, 1]] - xy[[1, 2, 3, 0]]\n\n # volume of the tetrahedron\n # TODO: remove abs, user must make sure all tetrahedrons are CCW.\n vt = np.abs(1./6 * la.det(s[[0, 1, 2]]))\n\n # calculate area (vector) of triangle faces\n # re-normalize using alternative (+,-) signs\n ij_pairs = [[0, 1], [1, 2], [2, 3], [3, 0]]\n signs = [1, -1, 1, -1]\n a = [sign*np.cross(s[i], s[j]) for (i, j), sign in zip(ij_pairs, signs)]\n a = np.array(a)\n\n # local (e for element) stiffness matrix\n ke_matrix = np.dot(a, a.transpose()) / (36. * vt)\n\n return ke_matrix\n"
] |
[
[
"numpy.dot",
"numpy.ix_",
"numpy.hstack",
"numpy.linalg.inv",
"scipy.sparse.csr_matrix",
"numpy.ones",
"numpy.linalg.det",
"numpy.isscalar",
"numpy.cross",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
SynStratos/dim_ae
|
[
"8840437574e7e3fee24fd78af6d1e74c6df4b68b"
] |
[
"dimae/autoencoders/autoencoder.py"
] |
[
"import numpy as np\nimport math\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras import Model\n\n\nclass AE(Model):\n def __init__(self,\n n_input=None,\n code_nodes=None,\n summary=False,\n _code_activation='relu',\n _output_activation='sigmoid',\n _inner_activation='tanh',\n _code_kernel='he_uniform',\n _output_kernel='truncated_normal',\n _inner_kernel='glorot_uniform',\n _red_factor=7.5,\n *args,\n **kwargs):\n \"\"\"\n :param n_input: number of input features. If you are using a DataFrame, it will be equal to df.shape[1]\n :param code_nodes: number of required nodes for the encoder output. It will be set automatically depending on\n the reduction factor if not given by the user.\n :param summary: set to True if you want to print the AutoEncoder model structure.\n :param _code_activation: activation function at the code layer. Set to ReLu by default.\n :param _output_activation: activation function at the output layer. Set to Sigmoid by default.\n :param _inner_activation: activation function at Encoder and Decoder inner layers. Set to TanH by default.\n :param _code_kernel: kernel initializer at the code layer. Set to He Uniform by default.\n :param _output_kernel: kernel initializer at the output layer. Set to Truncated Normal by default.\n :param _inner_kernel: kernel initializer at Encoder and Decoder inner layers. Set to Glorot Uniform by default.\n :param _red_factor: reduction factor used to choose the number of code neurons if not explicitly given.\n :param args:\n :param kwargs:\n \"\"\"\n super(AE, self).__init__(*args, **kwargs)\n\n self.input_nodes = n_input\n\n # either a number of code nodes or reduction factor can be specified\n self.code_nodes = __even__(self.input_nodes / _red_factor) if not code_nodes else code_nodes\n\n # number of layers\n n_layers = __layer_number__(n_input, code_nodes)\n\n # CHOSEN WAY TO SELECT CORRECT NUMBER OF LAYER NODES, BASED ON LOGSPACE\n a = np.log(self.code_nodes)\n b = np.log(self.input_nodes)\n\n encoder_layers = np.flip(np.logspace(a, b, n_layers, base=np.e))[1:-1]\n decoder_layers = np.logspace(a, b, n_layers, base=np.e)[1:-1]\n\n LAYERS = []\n\n # GENERATE LAYERS\n self.input_layer = Input(shape=(self.input_nodes,))\n LAYERS.append(self.input_layer)\n\n for n in encoder_layers:\n LAYERS.append(\n Dense(__even__(n), activation=_inner_activation, kernel_initializer=_inner_kernel)(LAYERS[-1])\n )\n\n self.code_layer = Dense(self.code_nodes, activation=_code_activation, kernel_initializer=_code_kernel)(LAYERS[-1])\n LAYERS.append(self.code_layer)\n\n for n in decoder_layers:\n LAYERS.append(\n Dense(__even__(n), activation=_inner_activation, kernel_initializer=_inner_kernel)(LAYERS[-1])\n )\n\n output_layer = Dense(self.input_nodes, activation=_output_activation, kernel_initializer=_output_kernel)(LAYERS[-1])\n\n # SETUP MODEL\n super().__init__(self.input_layer, output_layer, *args, **kwargs)\n\n # PRINT OUT MODEL STRUCTURE IF REQUIRED\n if summary:\n self.summary()\n\n def generate_encoder(self):\n \"\"\"\n Creates the encoder model extracting it from the trained Autoencoder.\n :return: keras model of the encoder\n \"\"\"\n encoder = Model(self.input_layer, self.code_layer)\n return encoder\n\n\ndef __layer_number__(a, b):\n \"\"\"\n Calculates the proper number of hidden layers\n :param a: number of input nodes\n :param b: number of code nodes\n :return: number of layers from input to code\n \"\"\"\n if a < b:\n raise Exception(\"The number of code neurons must be lower then input nodes number.\")\n div = a / b\n\n if div > 1.5:\n div = math.log(div, 10)\n div = math.e**div\n div += 1\n\n return math.ceil(div)\n\n\ndef __even__(f):\n \"\"\"\n Help function to round each number to even.\n :param f: given number\n :return: even approximation of the given number\n \"\"\"\n return math.ceil(f / 2.) * 2\n"
] |
[
[
"numpy.log",
"numpy.logspace",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input"
]
] |
aceilers/TheCannon
|
[
"c140a0c9555bb98956b013d1a9d29eb94ed4c514"
] |
[
"code/lamost/xcalib_5labels/paper_plots/teff_logg_training.py"
] |
[
"import pyfits\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.colors import LogNorm\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nimport numpy as np\n\n\ndirec = \"/Users/annaho/TheCannon/data/lamost_paper\"\nsnr = np.load(\"%s/ref_snr.npz\" %direc)['arr_0']\napogee = np.load(\"%s/ref_label.npz\" %direc)['arr_0']\ncannon = np.load(\"../all_cannon_label_vals.npz\")['arr_0']\n\nhdulist = pyfits.open(\"%s/lamost_catalog_training.fits\" %direc)\ntbdata = hdulist[1].data\nhdulist.close()\nsnrg = tbdata.field(\"snrg\")\nsnri = tbdata.field(\"snri\")\nlamost_id_full = tbdata.field(\"lamost_id\")\nlamost_id = np.array([val.strip() for val in lamost_id_full])\nlamost_teff = tbdata.field(\"teff_1\")\nlamost_logg = tbdata.field(\"logg_1\")\nlamost_feh = tbdata.field(\"feh\")\nlamost = np.vstack((lamost_teff, lamost_logg, lamost_feh)).T\n\ndata = [lamost, cannon, apogee]\n\nlow = 3800\nhigh = 5500\n\nlow2 = 0.5\nhigh2 = 4.0\n\nfig,axarr = plt.subplots(1,3, figsize=(10,5.5), sharex=True, sharey=True)\n\nnames = ['LAMOST DR2', 'Cannon/LAMOST', 'APOGEE DR12']\n\nfor i in range(0, len(names)):\n ax = axarr[i]\n use = data[i]\n im = ax.hist2d(use[:,0], use[:,1], norm=LogNorm(), bins=100, \n cmap=\"inferno\", range=[[low,high],[low2,high2]], vmin=1,vmax=70)\n ax.set_xlabel(r\"$\\mbox{T}_{\\mbox{eff}}$\" + \" [K]\", fontsize=16)\n if i == 0:\n ax.set_ylabel(\"log g [dex]\", fontsize=16)\n ax.set_title(\"%s\" %names[i], fontsize=16)\n ax.set_xlim(low,high)\n ax.set_ylim(low2,high2)\n ax.tick_params(axis='x', labelsize=16)\n ax.locator_params(nbins=5)\n #if i == 2: fig.colorbar(im[3], cax=ax, label=\"log(Number of Objects)\")\n #plt.savefig(\"rc_%s.png\" %names)\n #plt.close()\n\nplt.gca().invert_xaxis()\nplt.gca().invert_yaxis()\nfig.subplots_adjust(right=0.8)\ncbar_ax = fig.add_axes([0.85, 0.1, 0.02, 0.8])\ncbar = plt.colorbar(im[3], cax=cbar_ax)\ncbar.set_label(\"log(density)\", size=16)\ncbar.ax.tick_params(labelsize=16)\ncbar.ax.tick_params(labelsize=16)\n\nplt.show()\n#plt.savefig(\"rc_5label.png\")\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"numpy.load",
"matplotlib.pyplot.show",
"numpy.vstack"
]
] |
szqtc/MyMachineLearningNotes
|
[
"87fa278290d211fa9390dfdfb081acd90ceaeab9"
] |
[
"sharedcode/linear_models.py"
] |
[
"import numpy as np\nfrom . import kernels\n\nclass linear_regressor:\n \n def __init__(self):\n self.w_, self.b_ = None, 0.\n \n def fit(self, X, y):\n \"\"\"\n Parameters\n ------------\n X : numpy array, (m, n)\n features of training examples\n y : numpy array, (m, 1)\n labels of training examples\n \"\"\"\n m, n = X.shape\n X_ = np.c_[np.ones((m, 1)), X]\n w = np.linalg.inv(X_.T@X_) @ X_.T @ y # @ for dot product\n self.w_ = w[1:]\n self.b_ = w[0].squeeze()\n \n def predict(self, X):\n \"\"\"\n Parameters\n -------------\n X : numpy array, (m', n)\n feature vectors\n\n Returns\n ---------\n y : numpy array, (m', 1)\n predicted outputs\n \"\"\"\n return [email protected]_+self.b_\n\n\nclass ridge_regressor(linear_regressor):\n \"Linear model with regularization\"\n \n def __init__(self, C=1.):\n \"\"\"\n Parameters\n -------------\n C : non-negative double\n regularization factor\n \"\"\"\n self.C_ = C\n self.w_, self.b_ = None, 0.\n \n def fit(self, X, y):\n \"\"\"\n Parameters\n ------------\n X : numpy array, (m, n)\n features of training examples\n y : numpy array, (m, 1)\n labels of training examples\n \"\"\"\n m, n = X.shape\n X_ = np.c_[np.ones((m, 1)), X]\n I = np.diag([0.]+[1.]*n) # do not regularize the interception\n w_ = np.linalg.inv(X_.T @ X_ + I/self.C_) @ X_.T @ y\n self.w_ = w_[1:]\n self.b_ = w_[0].squeeze()\n\n\nclass kernel_ridge_regressor:\n \"Kernel ridge regression\"\n \n support_kernels = {'rbf': rbf, 'linear': linear, 'poly' : poly}\n \n def __init__(self, kernel='rbf', C=1., **kwds):\n \"\"\"\n Parameters\n -------------\n kernel : string, ='rbf', 'linear', or others\n the kernel adopted\n C : scalar, >0.\n constant to control the regularization\n **kwds :\n parameters passing to the kernel function\n \"\"\"\n self.C_ = C\n\n # set up kernels\n if kernel in self.support_kernels:\n self.kernel_ = kernel\n kernel_func = self.support_kernels[kernel]\n elif callable(kernel):\n try:\n self.kernel_ = kernel.__name__\n except AttributeError:\n self.kernel_ = str(kernel)\n kernel_func = kernel\n else:\n raise ValueError('The kernel {} is not support now'.format(kernel))\n\n self.kernel_parameters_ = kwds.copy()\n self.kernel_function_ = lambda x1, x2: kernel_func(x1, x2, **self.kernel_parameters_)\n \n def fit(self, X_data, y_data):\n \"\"\"\n Parameters\n -------------\n X_data : (m, n) array\n training data\n y_data : (m, 1) array, =+/-1\n the label of training data\n \n *NOTE*: we assume interception=0.\n \"\"\"\n m, n = X_data.shape\n kXX = self.kernel_function_(X_data.reshape(m, 1, n), X_data.reshape(1, m, n)) # m x m\n mat = kXX + np.eye(m)/self.C_\n self.mat_ = np.linalg.inv(mat)@y_data # m, 1\n self.X_fit_ = X_data\n \n def predict(self, X):\n \"\"\"\n Parameters\n -------------\n X : (m', n) array\n input features\n\n Returns\n ---------\n y_pred : (m', 1)\n predicted labels of input features\n \"\"\"\n m1, n = X.shape\n kXx = self.kernel_function_(X.reshape(m1, 1, n), self.X_fit_.reshape(1, -1, n)) # m1, m\n return [email protected]_ # m1, 1\n \n\nclass locally_weighted_linear_regressor:\n \"Locally weighted linear regression\"\n \n def __init__(self, tau=1.):\n \"\"\"\n Parameters\n -------------\n tau : float, >0., default: 1.\n the sigma in the Gaussian distribution\n \"\"\"\n self.tau_ = tau\n self.X_, self.y_ = None, None\n \n def fit(self, X, y):\n \"\"\"\n Parameters\n -------------\n X_train : numpy array, (m, n)\n features of the training sample\n y_train : numpy array, (m, 1)\n output of the training sample\n \"\"\"\n m = X.shape[0]\n self.X_ = np.c_[np.ones((m, 1)), X]\n self.y_ = y.copy()\n \n def predict(self, X):\n \"\"\"\n Parameters\n -------------\n X : numpy array, (m', n)\n the input feature to infer\n\n Returns\n ---------\n y_pred : numpy array, (m', 1)\n the predicted output of the given feature\n \"\"\"\n mm = X.shape[0]\n X_ = np.c_[np.ones((mm, 1)), X]\n \n y_pred = []\n for i in range(mm):\n sqdist = ((self.X_ - X_[i:i+1, :])**2).sum(axis=1)\n M = np.diag(np.exp(-sqdist/2./self.tau_**2))\n w = np.linalg.inv(self.X_.T@[email protected]_) @ self.X_.T @ M @ self.y_\n y_pred.append(X_[i, :]@w)\n return np.array(y_pred).reshape(-1, 1)\n\n\nclass logistic_regressor:\n \"Logistic regression with gradient descent\"\n \n def __init__(self, saveloss=True):\n \"\"\"\n Parameters\n -------------\n saveloss : bool, default: True\n whether to save the loss\n \"\"\"\n self.saveloss = saveloss\n self.losses = []\n \n self.w_, self.b_ = None, 0.\n self.learning_rate_, self.nepoch_ = None, None\n \n @staticmethod\n def loss(y, pprob):\n \"\"\"\n The loss function of logistic regression\n \n Parameters\n -------------\n y : numpy array, (m, 1)\n training target\n pprob : numpy array, (m, 1)\n the probabilty of positive type\n \n Returns\n ---------\n loss : double\n \"\"\"\n return -(y*np.log(pprob) + (1-y)*np.log(1.-pprob)).sum()\n \n def fit(self, X, y, learning_rate=0.1, nepoch=1000):\n \"\"\"\n Parameters\n -------------\n X_train : numpy array, (m, n)\n features of the training sample\n y_train : numpy array, (m, 1)\n output of the training sample\n learning_rate : positive double, default: 0.1\n nepoch : positive integer\n \"\"\"\n self.learning_rate_ = learning_rate\n self.nepoch_ = nepoch\n\n # gradient descent\n self.w_ = np.zeros((X.shape[1], 1))\n self.b_ = 0.\n self.losses = []\n for _ in range(nepoch):\n p = self.predict_proba(X)\n dy = (y-p)*self.learning_rate_\n self.b_ += dy.sum()\n self.w_ += X.T@dy\n\n if self.saveloss:\n self.losses.append(self.loss(y, p))\n \n def predict_proba(self, X):\n \"\"\"\n Parameters\n ------------\n X : (m, n) array\n m training samples with n features\n\n Returns\n ---------\n probability : (m, 1) array\n predicted probability for the positive case\n \"\"\"\n return 1./(1.+np.exp([email protected]_ - self.b_))\n \n def predict(self, X):\n \"\"\"\n Parameters\n ------------\n X : (m, n) array\n m training samples with n features\n\n Returns\n ---------\n y_pred : (m, 1) array\n predicted labels\n \"\"\"\n return self.predict_proba(X)>0.5\n \n\nclass softmax_regressor:\n \"Softmax regression\"\n \n def __init__(self, C=1., saveloss=True):\n \"\"\"\n Parameters\n -------------\n C : non-negative double, default: 1\n regularization factor\n saveloss : bool, default: True\n whether to save the loss\n \"\"\"\n self.C_ = C\n \n self.W_ = None\n self.learning_rate_ = None\n self.nepoch = None\n\n self.saveloss = saveloss\n self.losses =[]\n \n def predict_proba(self, X):\n \"\"\"\n Parameters\n -------------\n X : numpy array, (m, n)\n input features\n\n Returns\n ---------\n y_proba : numpy array, (m, t)\n predicted probablity in each class\n \"\"\"\n Z = [email protected]_[1:, :] + self.W_[:1, :]\n expZ = np.exp(Z)\n denorm = 1.+expZ.sum(axis=1, keepdims=True)\n return np.c_[expZ/denorm, 1./denorm]\n \n def predict(self, X):\n \"\"\"\n Parameters\n ------------\n X : numpy array, (m, n)\n input features\n\n Returns\n ---------\n y : numpy array, (m, 1)\n the best matched labels\n \"\"\"\n return self.predict_proba(X).argmax(axis=1).reshape(-1, 1)\n \n @staticmethod\n def loss(y, yprob):\n \"\"\"\n Parameters\n -------------\n y : integer numpy array, (m, 1)\n input labels\n yprob : numpy array, (m, t)\n the probality of each types\n\n Returns\n ---------\n loss : double\n \"\"\"\n t = yprob.shape[1]\n loss = 0.\n for i in range(t):\n probs = yprob[y[:, 0]==i, i]\n loss -= np.log(np.maximum(probs, 1e-10)).sum() # to avoid prob=0.\n return loss\n \n def fit(self, X, y, learning_rate=0.1, nepoch=1000):\n \"\"\"\n Parameters\n -------------\n X_train : numpy array, (m, n)\n features of the training sample\n y_train : numpy array, (m, 1)\n output of the training sample\n learning_rate : positive double, default: 0.1\n nepoch : positive integer\n \"\"\"\n self.learning_rate_ = learning_rate\n self.nepoch = nepoch\n\n m, n = X.shape\n X_ = np.c_[np.ones(m), X]\n \n t = np.unique(y).shape[0] # num of output types\n self.W_ = np.zeros((n+1, t-1)) # +1 for b\n\n # gradient descent\n self.losses = []\n for i in range(nepoch):\n p = self.predict_proba(X)\n for j in range(t-1):\n self.W_[:, j] += (X_.T@((y[:, 0]==j) - p[:, j])*self.C_ - self.W_[:, j])*self.learning_rate_\n \n if self.saveloss:\n self.losses.append(self.loss(y, p))"
] |
[
[
"numpy.diag",
"numpy.log",
"numpy.maximum",
"numpy.unique",
"numpy.linalg.inv",
"numpy.eye",
"numpy.ones",
"numpy.array",
"numpy.exp",
"numpy.zeros"
]
] |
progrunner17/blueoil
|
[
"5cbe8b2ceebaaa7a6582a377031ae92855bed0aa"
] |
[
"lmnet/lmnet/data_processor.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright 2018 The Blueoil Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nimport pprint\n\nimport numpy as np\n\n\nclass Sequence:\n \"\"\"Sequence several processor together.\n\n Args:\n processors (List[Processor]): list of processor.\n\n Examples:\n | *Sequence([*\n | *FlipLeftRight(0.5),*\n | *Hue((-10, 10)),*\n | *])*\n \"\"\"\n\n def __init__(self, processors):\n self.processors = processors\n\n def __call__(self, **kwargs):\n for processor in self.processors:\n kwargs = processor(**kwargs)\n return kwargs\n\n def __repr__(self):\n return pprint.saferepr(self.processors)\n\n # TODO(wakisaka): Should create interface class to set image size for processor child class.\n def set_image_size(self, image_size):\n \"\"\"Override processors image size\n\n Args:\n image_size(tuple): (height, width)\n \"\"\"\n\n # Avoid circular import\n from lmnet.pre_processor import Resize, ResizeWithGtBoxes, ResizeWithMask, LetterBoxes\n from lmnet.post_processor import FormatYoloV2\n\n for process in self.processors:\n class_list = (Resize, ResizeWithGtBoxes, ResizeWithMask, LetterBoxes)\n if isinstance(process, class_list):\n process.size = image_size\n\n if isinstance(process, FormatYoloV2):\n process.image_size = image_size\n\n\nclass Processor(metaclass=ABCMeta):\n\n @abstractmethod\n def __call__(self, **kwargs):\n \"\"\"Call processor method for each a element of data.\n\n Return image and labels etc.\n \"\"\"\n return kwargs\n\n def __repr__(self):\n return \"{}({})\".format(self.__class__.__name__, self.__dict__)\n\n\n# TODO(wakisaka): move to somewhere.\ndef binarize(labels, num_classes):\n \"\"\"Return numpy array binarized labels.\"\"\"\n targets = np.array(labels).reshape(-1)\n one_hot = np.eye(num_classes)[targets]\n return one_hot\n"
] |
[
[
"numpy.eye",
"numpy.array"
]
] |
emsloate/nba-movement-data
|
[
"d2ed96e7ac4f1060e60561bf9c4fc77bf9c63f27"
] |
[
"scripts/game_df.py"
] |
[
"\n# coding: utf-8\n\n# In[5]:\n\nimport pandas as pd\nimport os\nimport glob\nimport numpy as np\n# In[6]:\n\n#get game data\ndef get_game(path):\n game = pd.read_json(path)\n #get ids from events, home team and visiting team info\n game['event_id'] = game['events'].apply(lambda x: int(x['eventId']))\n game['visitor_name'] = game['events'].apply(lambda x: x['visitor']['name'])\n game['visitor_id'] = game['events'].apply(lambda x: int(x['visitor']['teamid']))\n game['home_name'] = game['events'].apply(lambda x: x['home']['name'])\n game['home_id'] = game['events'].apply(lambda x: int(x['home']['teamid']))\n return game\n\n\n# In[7]:\n\ndef get_home_visitor_players(game):\n #get all players on visiting team\n visitor = pd.DataFrame(game['events'][0]['visitor'])\n visitor['firstname'] = visitor['players'].apply(lambda x: x['firstname'])\n visitor['lastname'] = visitor['players'].apply(lambda x: x['lastname'])\n visitor['playerid'] = visitor['players'].apply(lambda x: int(x['playerid']))\n visitor['jersey'] = visitor['players'].apply(lambda x: int(x['jersey']))\n visitor['position'] = visitor['players'].apply(lambda x: x['position']).astype('category')\n #get all players on home team\n home = pd.DataFrame(game['events'][0]['home'])\n home['firstname'] = home['players'].apply(lambda x: x['firstname'])\n home['lastname'] = home['players'].apply(lambda x: x['lastname'])\n home['playerid'] = home['players'].apply(lambda x: int(x['playerid']))\n home['jersey'] = home['players'].apply(lambda x: int(x['jersey']))\n home['position'] = home['players'].apply(lambda x: x['position']).astype('category')\n return home, visitor\n\n\n# In[8]:\n\n#return the moment dictionary corresponding to the time of the shot\ndef right_moment(event,game_clock):\n moments = event['moments']\n for moment in moments:\n if moment[2] == game_clock:\n return moment\n \n return None\n\n\n# In[22]:\n\ndef get_game_shots(game,shots_fixed,game_id):\n #get only shots that happened in this game\n game_shots = shots_fixed[shots_fixed['GAME_ID'] == game_id]\n #merge shots with events\n this_game_shots = pd.merge(game_shots,game,left_on=['GAME_EVENT_ID'],right_on=['event_id'])\n #get the moment where the shot occured, moment here is an array\n this_game_shots['moment'] = this_game_shots.apply(lambda x: right_moment(x.events,x.SHOT_TIME),axis = 1)\n return this_game_shots\n\n\n# In[27]:\n\n#some shots will not be able to find the moment, this appears to be because the shot occured before the event started\n#this happens because actual shot time is 2-3 seconds before the recorded shot time\ndef get_missing_shots(this_game_shots,game):\n no_moment = this_game_shots[this_game_shots['moment'].isna()]\n #solution, get previous event and check for time of shot in that event\n no_moment['previous_id'] = no_moment['GAME_EVENT_ID'] - 1\n \n #remerging to get previous event data\n #this will remove some rows- event numbers seem to skip\n #TODO: find way to get these rows, perhaps then merge with event id -2, +1 ?? \n no_moment = pd.merge(no_moment,game,left_on=['previous_id'],right_on=['event_id'],suffixes=['_x',''])\n #hopefully shot time will be in this event\n no_moment['moment'] = no_moment.apply(lambda x: right_moment(x.events,x.SHOT_TIME),axis = 1)\n #drop duplicate columns so we can add this back to tgs dataframe\n no_moment.drop(no_moment.filter(regex='_x$').columns.tolist(),axis=1, inplace=True)\n no_moment.drop('previous_id',axis = 1,inplace=True)\n \n #re add shots that did not have an inital moment back to the dataframe of shot moments\n #getting shots that originally did not have na moments\n tgs_mom = this_game_shots[~this_game_shots['moment'].isna()]\n #add shots from no_moments\n new_tgs = pd.concat([tgs_mom,no_moment])\n #may still have na moments, have to discard these for now\n new_tgs = new_tgs[~new_tgs['moment'].isna()]\n \n return new_tgs\n\n\n# In[11]:\n\n#get player id, x & y location for each player. home players are always first 5\ndef get_xy(new_tgs):\n for i in range(1,6):\n #home players are in spots 1-5 in 5th element of a moment array\n #spot 0 is the ball\n new_tgs['home_p{}'.format(i)] = new_tgs['moment'].apply(lambda x: x[5][i][1])\n new_tgs['home_p{}_x'.format(i)] = new_tgs['moment'].apply(lambda x: x[5][i][2])\n new_tgs['home_p{}_y'.format(i)] = new_tgs['moment'].apply(lambda x: x[5][i][3])\n \n #visitors are 6-10\n new_tgs['visitor_p{}'.format(i)] = new_tgs['moment'].apply(lambda x: x[5][i+5][1])\n new_tgs['visitor_p{}_x'.format(i)] = new_tgs['moment'].apply(lambda x: x[5][i+5][2])\n new_tgs['visitor_p{}_y'.format(i)] = new_tgs['moment'].apply(lambda x: x[5][i+5][3])\n \n return new_tgs\n\n\n# In[12]:\n\n# def convert_locations(df):\n# # first force all points above 47 to their half court counterparts\n# # keep all original points for furhter limitations to single court\n# for i in range(1,6):\n# df['home_p{}_x_loc_original'.format(i)] = df['home_p{}_x'.format(i)]\n# df['home_p{}_y_loc_original'.format(i)] = df['home_p{}_y'.format(i)]\n\n# #locations on other side of court are chnaged so they reflect the same location on 1st half of court\n# df.loc[df['home_p{}_x'.format(i)] > 47,'home_p{}_y'.format(i)] = df.loc[df.x_loc > 47, 'home_p{}_y'.format(i)].apply(lambda y: 50 - y)\n# df.loc[df['home_p{}_x'.format(i)] > 47,'home_p{}_x'.format(i)] = df.loc[df.x_loc > 47, 'home_p{}_x'.format(i)].apply(lambda x: 94 - x)\n\n# # convert to half court scale\n# # note the x_loc and the y_loc are switched in shot charts from movement data (charts are perpendicular)\n# for i in range(1,6):\n# data['x_loc_copy'] = data['x_loc']\n# data['y_loc_copy'] = data['y_loc']\n\n# # Range conversion formula\n# # http://math.stackexchange.com/questions/43698/range-scaling-problem\n\n# data['x_loc'] = data['y_loc_copy'].apply(lambda y: 250 * (1 - (y - 0)/(50 - 0)) + -250 * ((y - 0)/(50 - 0)))\n# data['y_loc'] = data['x_loc_copy'].apply(lambda x: -47.5 * (1 - (x - 0)/(47 - 0)) + 422.5 * ((x - 0)/(47 - 0)))\n# data = data.drop(['x_loc_copy', 'y_loc_copy'], axis=1, inplace=False)\n \n# return df\n\n\n# In[13]:\n\ndef get_home_vis_shots(new_tgs,home,visitor):#using shooter id & team id, get defender distances & angles\n home_id = home.teamid[0]\n visitor_id = visitor.teamid[0]\n #divide shots into those taken by home, away teams\n new_tgs['home_shot'] = new_tgs.apply(lambda x: 1 if x.TEAM_ID == home_id else 0,axis = 1)\n home_tgs = new_tgs[new_tgs['home_shot'] == 1]\n visitor_tgs = new_tgs[new_tgs['home_shot'] == 0]\n \n return home_tgs,visitor_tgs\n\n\n# In[31]:\n\ndef get_shooter_loc(shooter_id,ids,locs):\n for pid,loc in zip(ids,locs):\n if shooter_id == pid:\n return loc\n\n\n# In[32]:\n\n#get shooter x, y coordinates\ndef get_shooter_coords(df,h_v):\n '''\n df: DataFrame\n h_v: string, either 'home' or 'visitor', indicates which df we are working with and thus which players we want to search for to get shooter\n \n returns: dataframe with two new columns which are x & y coordinate of the shooter at the time of the shot\n '''\n #long line of code here - takes player ids of each player and their coordinate, selects the one that matches shooter id and returns that coordinate\n df['shooter_x'] = df.apply(lambda x: get_shooter_loc(x.PLAYER_ID,\n [x['{}_p1'.format(h_v)],x['{}_p2'.format(h_v)],x['{}_p3'.format(h_v)],x['{}_p4'.format(h_v)],x['{}_p5'.format(h_v)]],\n [x['{}_p1_x'.format(h_v)],x['{}_p2_x'.format(h_v)],x['{}_p3_x'.format(h_v)],x['{}_p4_x'.format(h_v)],x['{}_p5_x'.format(h_v)]]),\n axis = 1)\n \n df['shooter_y'] = df.apply(lambda x: get_shooter_loc(x.PLAYER_ID,\n [x['{}_p1'.format(h_v)],x['{}_p2'.format(h_v)],x['{}_p3'.format(h_v)],x['{}_p4'.format(h_v)],x['{}_p5'.format(h_v)]],[x['{}_p1_y'.format(h_v)],\n x['{}_p2_y'.format(h_v)],x['{}_p3_y'.format(h_v)],x['{}_p4_y'.format(h_v)],x['{}_p5_y'.format(h_v)]]),\n axis = 1) \n \n return df\n\n\n# In[39]:\n\n#get defender distances, angles\ndef get_defender_distances(df,defender,h_v):\n '''\n df: dataframe\n defender: integer, defender #\n h_v: which team *defenders* are on, either 'home' or 'visitor'\n \n returns: numpy array of distance between shooter and defender\n '''\n shooter_locs = df[['shooter_x','shooter_y']].values\n def_locs = df[['{}_p{}_x'.format(h_v,defender),'{}_p{}_y'.format(h_v,defender)]].values\n #norm of differences = distance\n diff = shooter_locs - def_locs\n return np.linalg.norm(diff,axis = 1)\n\ndef get_defender_angles(df,defender,h_v):\n '''\n df: dataframe\n defender: integer, defender #\n h_v: which team *defenders* are on, either 'home' or 'visitor'\n \n returns: numpy array of angle between shooter and defender (in degrees)\n '''\n shooter_locs = df[['shooter_x','shooter_y']].values\n def_locs = df[['{}_p{}_x'.format(h_v,defender),'{}_p{}_y'.format(h_v,defender)]].values\n #substract shooter location from each coordinate so shooter is at the origin\n diff = def_locs - shooter_locs\n #use inverse tangent to get angle between shooter, defender\n return np.degrees(np.arctan(diff[:,1] / diff[:,0]))\n\n\n# In[35]:\n\n#add defender locs, distances to dataframe\ndef defender_dist_angles(df,is_home):\n '''\n df: dataframe of shots & player locs & loc of shooter\n is_home: boolean, tells us if df is home or away team\n \n returns: df with distances, angles of each defender from shooter\n '''\n if is_home:\n for i in range(1,6):\n #since we want info about defenders, if we are the home team we pass visitor to the functions here\n df['defender_{}_dist'.format(i)] = get_defender_distances(df,i,'visitor')\n df['defender_{}_angle'.format(i)] = get_defender_angles(df,i,'visitor')\n else:\n for i in range(1,6):\n #since we want info about defenders, if we are the visiting team we pass home to the functions here\n df['defender_{}_dist'.format(i)] = get_defender_distances(df,i,'home')\n df['defender_{}_angle'.format(i)] = get_defender_angles(df,i,'home')\n return df\n\n\n# In[43]:\n\n#sort defenders by distance to shooter, select top 3 to add as features\ndef get_closest_defenders(df):\n df['defender_prox'] = df.apply(lambda x: np.argsort([x.defender_1_dist,x.defender_2_dist,x.defender_3_dist,x.defender_4_dist,x.defender_5_dist]),axis = 1)\n\n #defender prox is array of defender indices (starting at 0, so add 1 for each defender)\n #also want to get angles for the 3 closest defenders\n df['1st_closest_defender_dist'] = df.apply(lambda x: x['defender_{}_dist'.format(x['defender_prox'][0] + 1)], axis = 1)\n df['1st_closest_defender_angle'] = df.apply(lambda x: x['defender_{}_angle'.format(x['defender_prox'][0] + 1)], axis = 1)\n\n df['2nd_closest_defender_dist'] = df.apply(lambda x: x['defender_{}_dist'.format(x['defender_prox'][1] + 1)], axis = 1)\n df['2nd_closest_defender_angle'] = df.apply(lambda x: x['defender_{}_angle'.format(x['defender_prox'][1] + 1)], axis = 1)\n\n df['3rd_closest_defender_dist'] = df.apply(lambda x: x['defender_{}_dist'.format(x['defender_prox'][2] + 1)], axis = 1)\n df['3rd_closest_defender_angle'] = df.apply(lambda x: x['defender_{}_angle'.format(x['defender_prox'][2] + 1)], axis = 1)\n\n #these are the columns we want for now\n df_final = df[['GAME_ID', 'GAME_EVENT_ID', 'PLAYER_ID', 'PLAYER_NAME',\n 'TEAM_ID', 'TEAM_NAME', 'PERIOD', 'MINUTES_REMAINING',\n 'SECONDS_REMAINING', 'EVENT_TYPE', 'ACTION_TYPE', 'SHOT_TYPE',\n 'SHOT_ZONE_BASIC', 'SHOT_ZONE_AREA', 'SHOT_ZONE_RANGE', 'SHOT_DISTANCE',\n 'LOC_X', 'LOC_Y', 'SHOT_ATTEMPTED_FLAG', 'SHOT_MADE_FLAG', 'GAME_DATE',\n 'HTM', 'VTM', 'EVENTTIME', 'QUARTER', 'SHOT_TIME','visitor_name', 'visitor_id', 'home_name',\n 'shooter_x','shooter_y','home_id','1st_closest_defender_dist',\n '1st_closest_defender_angle', '2nd_closest_defender_dist',\n '2nd_closest_defender_angle', '3rd_closest_defender_dist',\n '3rd_closest_defender_angle']]\n \n return df_final\n\n\n# In[37]:\n\ndef gather_data(game_num):\n #get shot data\n shots_fixed = pd.read_csv(\"../data/shots/shots_fixed.csv\")\n \n #get \"raw\" game data\n game = get_game(\"../data/{}.json\".format(game_num))\n \n #get home, visitor players as dfs\n home,visitor = get_home_visitor_players(game)\n \n #get all shots from this game\n this_game_shots = get_game_shots(game,shots_fixed,int(game_num))\n \n #add moment data for each shot\n new_tgs = get_missing_shots(this_game_shots,game)\n \n #get x&y coordingates of each players\n new_tgs = get_xy(new_tgs)\n \n #seperate shots for home team and visiting team\n home_tgs,visitor_tgs = get_home_vis_shots(new_tgs,home,visitor)\n \n ##get the shooter coordinates for each shot\n home_tgs = get_shooter_coords(home_tgs,'home')\n visitor_tgs = get_shooter_coords(visitor_tgs,'visitor')\n \n #get distances, angles (to shooter) of each defender on the court\n home_tgs = defender_dist_angles(home_tgs,True)\n visitor_tgs = defender_dist_angles(visitor_tgs,False)\n \n #choose 3 closest defenders as features, remove extra column\n home_final = get_closest_defenders(home_tgs)\n visitor_final = get_closest_defenders(visitor_tgs)\n \n home_final.to_csv('../data/game_shots/{}_home.csv'.format(game_num))\n visitor_final.to_csv('../data/game_shots/{}_visitor.csv'.format(game_num))\n\n\n\nfiles = glob.glob(\"../data/*.json\")\nnum_files = len(files)\nnum_processed = 0\nfor f in files:\n fnum = f.split(\".json\")[0]\n fnum = fnum.split(\"data/\")[1]\n print(\"Processing file : {}\".format(fnum))\n try:\n gather_data(fnum)\n num_processed += 1\n print(\"Processed {} / {} files\".format(num_processed,num_files))\n except:\n print(\"Could not process \",fnum)\n\n"
] |
[
[
"pandas.merge",
"pandas.concat",
"pandas.read_csv",
"numpy.arctan",
"numpy.linalg.norm",
"pandas.DataFrame",
"pandas.read_json",
"numpy.argsort"
]
] |
nithinksath96/Small_object_detection
|
[
"b2ccd29d128487909f92e4fc796f72a0a4458e32"
] |
[
"argo_data_scripts/det/models/darknet.py"
] |
[
"from os.path import basename\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom mmdet.models.registry import BACKBONES\r\n\r\nclass weightedFeatureFusion(nn.Module): # weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\r\n def __init__(self, layers, weight=False):\r\n super(weightedFeatureFusion, self).__init__()\r\n self.layers = layers # layer indices\r\n self.weight = weight # apply weights boolean\r\n self.n = len(layers) + 1 # number of layers\r\n if weight:\r\n self.w = torch.nn.Parameter(torch.zeros(self.n)) # layer weights\r\n\r\n def forward(self, x, outputs):\r\n # Weights\r\n if self.weight:\r\n w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1)\r\n x = x * w[0]\r\n\r\n # Fusion\r\n nc = x.shape[1] # input channels\r\n for i in range(self.n - 1):\r\n a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[self.layers[i]] # feature to add\r\n ac = a.shape[1] # feature channels\r\n dc = nc - ac # delta channels\r\n\r\n # Adjust channels\r\n if dc > 0: # slice input\r\n x[:, :ac] = x[:, :ac] + a # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a\r\n elif dc < 0: # slice feature\r\n x = x + a[:, :nc]\r\n else: # same shape\r\n x = x + a\r\n return x\r\n\r\[email protected]_module\r\nclass Darknet(nn.Module):\r\n \"\"\"Darknet backbone.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n cfg=None,\r\n out_indices=(1, 2, 3),\r\n ):\r\n super().__init__()\r\n if cfg is None:\r\n from os.path import dirname, realpath, join\r\n filedir = dirname(realpath(__file__))\r\n cfg = join(filedir, 'darknet53.cfg')\r\n self.module_defs, self.stage_last_layer = self.parse_model_cfg(cfg)\r\n # note stage_last_layer is the indices in module_defs, not neccesarily pytorch layers\r\n self.out_indices = out_indices\r\n self.out_layers = [self.stage_last_layer[i] for i in out_indices]\r\n\r\n self.module_list, self.routs = self.create_modules(self.module_defs)\r\n\r\n # possible extension if training takes too long\r\n # self._freeze_stages()\r\n\r\n def parse_model_cfg(self, path):\r\n # Parse the yolo *.cfg file and return module definitions path may be 'cfg/yolov3.cfg', 'yolov3.cfg', or 'yolov3'\r\n if not path.endswith('.cfg'): # add .cfg suffix if omitted\r\n path += '.cfg'\r\n\r\n with open(path, 'r') as f:\r\n lines = f.read().split('\\n')\r\n lines = [x for x in lines if x and not x.startswith('#')]\r\n lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces\r\n mdefs = [] # module definitions\r\n stage_last_layer = []\r\n for line in lines:\r\n if line.startswith('['): # This marks the start of a new block\r\n mdefs.append({})\r\n mdefs[-1]['type'] = line[1:-1].rstrip()\r\n if mdefs[-1]['type'] == 'convolutional':\r\n mdefs[-1]['batch_normalize'] = 0 # pre-populate with zeros (may be overwritten later)\r\n elif line.startswith('@stage'):\r\n stage_last_layer.append(len(mdefs) - 2) # there is a pop(0) in create_modules\r\n else:\r\n key, val = line.split(\"=\")\r\n key = key.rstrip()\r\n\r\n if key == 'anchors': # return nparray\r\n mdefs[-1][key] = np.array([float(x) for x in val.split(',')]).reshape((-1, 2)) # np anchors\r\n elif key in ['from', 'layers', 'mask']: # return array\r\n mdefs[-1][key] = [int(x) for x in val.split(',')]\r\n else:\r\n val = val.strip()\r\n if val.isnumeric(): # return int or float\r\n mdefs[-1][key] = int(val) if (int(val) - float(val)) == 0 else float(val)\r\n else:\r\n mdefs[-1][key] = val # return string\r\n\r\n # Check all fields are supported\r\n supported = ['type', 'batch_normalize', 'filters', 'size', 'stride', 'pad', 'activation', 'layers', 'groups',\r\n 'from', 'mask', 'anchors', 'classes', 'num', 'jitter', 'ignore_thresh', 'truth_thresh', 'random',\r\n 'stride_x', 'stride_y', 'weights_type', 'weights_normalization', 'scale_x_y', 'beta_nms', 'nms_kind',\r\n 'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh']\r\n\r\n f = [] # fields\r\n for x in mdefs[1:]:\r\n [f.append(k) for k in x if k not in f]\r\n u = [x for x in f if x not in supported] # unsupported fields\r\n assert not any(u), \"Unsupported fields %s in %s. See https://github.com/ultralytics/yolov3/issues/631\" % (u, path)\r\n\r\n return mdefs, stage_last_layer\r\n\r\n def create_modules(self, module_defs):\r\n # Constructs module list of layer blocks from module configuration in module_defs\r\n\r\n hyperparams = module_defs.pop(0)\r\n output_filters = [int(hyperparams['channels'])]\r\n module_list = nn.ModuleList()\r\n routs = [] # list of layers which rout to deeper layers\r\n\r\n for i, mdef in enumerate(module_defs):\r\n modules = nn.Sequential()\r\n\r\n if mdef['type'] == 'convolutional':\r\n bn = mdef['batch_normalize']\r\n filters = mdef['filters']\r\n size = mdef['size']\r\n stride = mdef['stride'] if 'stride' in mdef else (mdef['stride_y'], mdef['stride_x'])\r\n modules.add_module('Conv2d', nn.Conv2d(in_channels=output_filters[-1],\r\n out_channels=filters,\r\n kernel_size=size,\r\n stride=stride,\r\n padding=(size - 1) // 2 if mdef['pad'] else 0,\r\n groups=mdef['groups'] if 'groups' in mdef else 1,\r\n bias=not bn))\r\n if bn:\r\n modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.1))\r\n if mdef['activation'] == 'leaky': # activation study https://github.com/ultralytics/yolov3/issues/441\r\n modules.add_module('activation', nn.LeakyReLU(0.1, inplace=True))\r\n\r\n elif mdef['type'] == 'maxpool':\r\n size = mdef['size']\r\n stride = mdef['stride']\r\n maxpool = nn.MaxPool2d(kernel_size=size, stride=stride, padding=(size - 1) // 2)\r\n if size == 2 and stride == 1: # yolov3-tiny\r\n modules.add_module('ZeroPad2d', nn.ZeroPad2d((0, 1, 0, 1)))\r\n modules.add_module('MaxPool2d', maxpool)\r\n else:\r\n modules = maxpool\r\n\r\n elif mdef['type'] == 'upsample':\r\n modules = nn.Upsample(scale_factor=mdef['stride'])\r\n\r\n elif mdef['type'] == 'route': # nn.Sequential() placeholder for 'route' layer\r\n layers = mdef['layers']\r\n filters = sum([output_filters[i + 1 if i > 0 else i] for i in layers])\r\n routs.extend([l if l > 0 else l + i for l in layers])\r\n\r\n elif mdef['type'] == 'shortcut': # nn.Sequential() placeholder for 'shortcut' layer\r\n layers = mdef['from']\r\n filters = output_filters[-1]\r\n routs.extend([i + l if l < 0 else l for l in layers])\r\n modules = weightedFeatureFusion(layers=layers, weight='weights_type' in mdef)\r\n\r\n else:\r\n print('Warning: Unrecognized Layer Type: ' + mdef['type'])\r\n\r\n # Register module list and number of output filters\r\n module_list.append(modules)\r\n output_filters.append(filters)\r\n\r\n return module_list, routs\r\n\r\n def load_darknet_weights(self, path, cutoff=-1):\r\n # Parses and loads the weights stored in 'weights'\r\n\r\n # Establish cutoffs (load layers between 0 and cutoff. if cutoff = -1 all are loaded)\r\n \r\n filename = basename(path)\r\n if filename == 'darknet53.conv.74':\r\n cutoff = 75\r\n elif filename == 'yolov3-tiny.conv.15':\r\n cutoff = 15\r\n\r\n # Read weights file\r\n with open(path, 'rb') as f:\r\n # Read Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346\r\n version = np.fromfile(f, dtype=np.int32, count=3) # (int32) version info: major, minor, revision\r\n seen = np.fromfile(f, dtype=np.int64, count=1) # (int64) number of images seen during training\r\n\r\n weights = np.fromfile(f, dtype=np.float32) # the rest are weights\r\n\r\n ptr = 0\r\n for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):\r\n if mdef['type'] == 'convolutional':\r\n conv = module[0]\r\n if mdef['batch_normalize']:\r\n # Load BN bias, weights, running mean and running variance\r\n bn = module[1]\r\n nb = bn.bias.numel() # number of biases\r\n # Bias\r\n bn.bias.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.bias))\r\n ptr += nb\r\n # Weight\r\n bn.weight.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.weight))\r\n ptr += nb\r\n # Running Mean\r\n bn.running_mean.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.running_mean))\r\n ptr += nb\r\n # Running Var\r\n bn.running_var.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.running_var))\r\n ptr += nb\r\n else:\r\n # Load conv. bias\r\n nb = conv.bias.numel()\r\n conv_b = torch.from_numpy(weights[ptr:ptr + nb]).view_as(conv.bias)\r\n conv.bias.data.copy_(conv_b)\r\n ptr += nb\r\n # Load conv. weights\r\n nw = conv.weight.numel() # number of weights\r\n conv.weight.data.copy_(torch.from_numpy(weights[ptr:ptr + nw]).view_as(conv.weight))\r\n ptr += nw\r\n\r\n def init_weights(self, pretrained=None):\r\n if pretrained is not None:\r\n self.load_darknet_weights(pretrained)\r\n\r\n # def _freeze_stages(self):\r\n # if self.frozen_stages >= 0:\r\n # self.norm1.eval()\r\n # for m in [self.conv1, self.norm1]:\r\n # for param in m.parameters():\r\n # param.requires_grad = False\r\n\r\n # for i in range(1, self.frozen_stages + 1):\r\n # m = getattr(self, 'layer{}'.format(i))\r\n # m.eval()\r\n # for param in m.parameters():\r\n # param.requires_grad = False\r\n\r\n def forward(self, x):\r\n outs = []\r\n outs_for_shortcut = []\r\n verbose = False\r\n if verbose:\r\n str = ''\r\n print('0', x.shape)\r\n\r\n for i, (mdef, module) in enumerate(zip(self.module_defs, self.module_list)):\r\n mtype = mdef['type']\r\n if mtype in ['convolutional', 'upsample', 'maxpool']:\r\n x = module(x)\r\n elif mtype == 'shortcut': # sum\r\n if verbose:\r\n l = [i - 1] + module.layers # layers\r\n s = [list(x.shape)] + [list(outs_for_shortcut[i].shape) for i in module.layers] # shapes\r\n str = ' >> ' + ' + '.join(['layer %g %s' % x for x in zip(l, s)])\r\n x = module(x, outs_for_shortcut) # weightedFeatureFusion()\r\n elif mtype == 'route': # concat\r\n layers = mdef['layers']\r\n if verbose:\r\n l = [i - 1] + layers # layers\r\n s = [list(x.shape)] + [list(outs_for_shortcut[i].shape) for i in layers] # shapes\r\n str = ' >> ' + ' + '.join(['layer %g %s' % x for x in zip(l, s)])\r\n if len(layers) == 1:\r\n x = outs_for_shortcut[layers[0]]\r\n else:\r\n try:\r\n x = torch.cat([out[i] for i in layers], 1)\r\n except: # apply stride 2 for darknet reorg layer\r\n outs_for_shortcut[layers[1]] = F.interpolate(outs_for_shortcut[layers[1]], scale_factor=[0.5, 0.5])\r\n x = torch.cat([outs_for_shortcut[i] for i in layers], 1)\r\n\r\n outs_for_shortcut.append(x if i in self.routs else [])\r\n if i in self.out_layers:\r\n outs.append(x)\r\n\r\n if verbose:\r\n print('%g/%g %s -' % (i, len(self.module_list), mtype), list(x.shape), str)\r\n str = ''\r\n\r\n return tuple(outs)\r\n\r\n"
] |
[
[
"torch.nn.Sequential",
"torch.sigmoid",
"numpy.fromfile",
"torch.zeros",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.from_numpy",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.nn.LeakyReLU",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ZeroPad2d"
]
] |
iorodeo/lia_video
|
[
"a362ea70c63e31da2e24401b66f4cfd058417d6c"
] |
[
"lia_progress/nodes/progress_bar_node.py"
] |
[
"#!/usr/bin/env python\nimport roslib\nroslib.load_manifest('lia_progress')\nimport rospy\nimport numpy\nimport cv\n\nfrom sensor_msgs.msg import Image\nfrom cv_bridge.cv_bridge import CvBridge \nfrom cv_bridge.cv_bridge import CvBridgeError\nfrom lia_messages.msg import ProgressMsg\n\nclass Progress_Bar(object):\n\n def __init__(self):\n self.image_shape = (15,640,3)\n self.empty_color = (230,230,230)\n self.fill_color = (0,0,200)\n self.base_array = 255*numpy.ones(self.image_shape,dtype=numpy.uint8)\n for i in range(0,3):\n self.base_array[:,:,i] = self.empty_color[i]\n self.bridge = CvBridge()\n rospy.init_node('progress_bar')\n\n # Pulications\n self.pub = rospy.Publisher('image_progress_bar', Image)\n\n # Subscriptions\n self.sub = rospy.Subscriber('progress',ProgressMsg,self.handle_progress_msg)\n\n\n def handle_progress_msg(self,data):\n frame_count = data.frame_count \n progress_t = data.progress_t \n record_t = data.record_t\n image_array = numpy.array(self.base_array)\n if record_t > 0:\n fill_ind = int(self.image_shape[1]*progress_t/record_t)\n else:\n fill_ind = self.image_shape[1]\n for i in range(0,3):\n image_array[:,:fill_ind,i] = self.fill_color[i]\n cv_image = cv.fromarray(image_array)\n rosimage = self.bridge.cv_to_imgmsg(cv_image,'rgb8')\n self.pub.publish(rosimage)\n\n def run(self):\n rospy.spin()\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n\n node = Progress_Bar()\n node.run()\n\n\n"
] |
[
[
"numpy.array",
"numpy.ones"
]
] |
runxi-shen/Modeling-Evolution-at-bam
|
[
"22e00a9da4f434335cdbbb553aa519145c60279e"
] |
[
"vcf2fasta.py"
] |
[
"#!/home/rs2474/anaconda3/envs/imktData/bin/python\n\n### Convert VCF ouput from SLiM to fasta file for iMKT analysis\n\nimport pandas as pd\nimport numpy as np\nimport sys\n\n\ndef output_ref_sub_seq(slim_out, gen):\n with open(slim_out, 'r') as f:\n f_lines = f.readlines()\n ## get the original reference of bam CDS\n ref_line = list(filter(lambda line: line.startswith('Ancestral:'), f_lines))[0]\n ref_bam_cds = ref_line.split(':')[1].strip()\n ref_bam_cds_codons = [ref_bam_cds[i:i+3] for i in range(0, len(ref_bam_cds), 3)]\n \n ## check whether new reference seq exists\n new_ref_line = list(filter(lambda line: line.startswith('Generation {} New Ancestral: '.format(gen)), f_lines))[0]\n new_ref_cds = new_ref_line.split(':')[1].strip()\n new_ref_bam_cds_codons = [new_ref_cds[i:i+3] for i in range(0, len(new_ref_cds), 3)]\n ## check the number of complex codons in the simulation\n codon_diff = list(map(lambda ref, sub: sum(np.char.equal(np.array(list(ref)), np.array(list(sub)))), ref_bam_cds_codons,new_ref_bam_cds_codons))\n print(\"{} complex codons detected in counting divergence.\".format(len(list(filter(lambda x: x < 2, codon_diff)))))\n return ref_bam_cds, new_ref_cds\n \n \ndef output_haplo_from_vcf(ref_seq, vcf_file):\n new_ref_cds = list(ref_seq)\n\n vcf_df = pd.read_csv(vcf_file, sep='\\t', skiprows=14, header=0)\n seq_dict_hap1 = {k+'-1':new_ref_cds.copy() for k in vcf_df.iloc[:,9:].columns}\n seq_dict_hap2 = {k+'-2':new_ref_cds.copy() for k in vcf_df.iloc[:,9:].columns}\n \n ref_codon_pos = [list(range(i, i+3)) for i in range(0, len(ref_seq), 3)]\n snp_pos = (vcf_df['POS']-1).tolist() # VCF 1-index while SLiM uses 0-index\n \n ## check the number of complex codons in the simulation\n snp_per_codon = list(map(lambda codon_pos: len(set(snp_pos).intersection(codon_pos)), ref_codon_pos))\n print(\"{} potential complex codons detected in counting polymorphisms.\".format(len(list(filter(lambda x: x >= 2, snp_per_codon)))))\n \n for row_idx, row in vcf_df.iterrows():\n idx = row['POS']-1\n ## check if the ref match up\n ## if the assertion fails, check whether you replace the substitutions in the ancestral sequence\n assert row['REF'] == new_ref_cds[idx]\n ## get the alt alleles\n alt_alleles = [row['ALT']] if (len(row['ALT'])==1) else row['ALT'].split(',')\n alt_allele_dict = dict(zip(range(1,len(alt_alleles)+1), alt_alleles))\n alt_allele_dict[0] = row['REF']\n\n for hap in vcf_df.iloc[:,9:]:\n seq_dict_hap1[hap+'-1'][idx] = alt_allele_dict[int(row[hap].split('|')[0])]\n seq_dict_hap2[hap+'-2'][idx] = alt_allele_dict[int(row[hap].split('|')[1])]\n\n seq_dict_hap1 = list(map(lambda x: (x[0],''.join(x[1])), seq_dict_hap1.items()))\n seq_dict_hap2 = list(map(lambda x: (x[0],''.join(x[1])), seq_dict_hap2.items()))\n \n return seq_dict_hap1, seq_dict_hap2\n\n\ndef output2fasta(fasta_file, new_ref_cds, ref_bam_cds, seq_dict_hap1, seq_dict_hap2):\n with open(fasta_file, 'w') as f:\n f.write('>REF\\n'+new_ref_cds+'\\n')\n for hap1, hap2 in zip(seq_dict_hap1, seq_dict_hap2):\n f.write('>'+hap1[0]+'\\n'+hap1[1]+'\\n')\n f.write('>'+hap2[0]+'\\n'+hap2[1]+'\\n')\n f.write('>OUTGROUP\\n'+ref_bam_cds+'\\n')\n \n\ndef main():\n vcf_file = sys.argv[1]\n slim_out = sys.argv[2]\n gen = int(list(filter(lambda x: 'Gen' in x, '.'.join(vcf_file.split('.')[:-1]).split('_')))[0][3:])\n fasta_file = '.'.join(vcf_file.split('.')[:-1]) + '.fa'\n \n print()\n print('From VCF file:', vcf_file)\n print('From SLiM output:', slim_out)\n \n ref_bam_cds, new_ref_cds = output_ref_sub_seq(slim_out, int(gen))\n seq_dict_hap1, seq_dict_hap2 = output_haplo_from_vcf(new_ref_cds, vcf_file)\n output2fasta(fasta_file, new_ref_cds, ref_bam_cds, seq_dict_hap1, seq_dict_hap2)\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.read_csv"
]
] |
Kar1suMAX/python-machine-learning-book-2nd-edition
|
[
"3150aee458592c20acf974f370e0a7399d373e7e"
] |
[
"Chap10.py"
] |
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import RANSACRegressor\nfrom sklearn.model_selection import train_test_split\nimport scipy as sp\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\n#--------------------------------------------------------------------------------------------\n#データのインプット\ndf = pd.read_csv('https://raw.githubusercontent.com/rasbt/'\n 'python-machine-learning-book-2nd-edition'\n '/master/code/ch10/housing.data.txt',\n header=None,\n sep='\\s+')\n\ndf.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', \n 'NOX', 'RM', 'AGE', 'DIS', 'RAD', \n 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']\n\nprint(df.head())\n#--------------------------------------------------------------------------------------------\n#散布図行列のプロット\ncols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV']\n\nsns.pairplot(df[cols], size=2.5)\nplt.tight_layout()\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n#共分散行列のプロット\ncm = np.corrcoef(df[cols].values.T)\n#sns.set(font_scale=1.5)\nhm = sns.heatmap(cm,\n cbar=True,\n annot=True,\n square=True,\n fmt='.2f',\n annot_kws={'size': 15},\n yticklabels=cols,\n xticklabels=cols)\n\nplt.tight_layout()\n# plt.savefig('images/10_04.png', dpi=300)\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n#OLSの実装\nclass LinearRegressionGD(object):\n\n def __init__(self, eta=0.001, n_iter=20):\n self.eta = eta\n self.n_iter = n_iter\n\n def fit(self, X, y):\n self.w_ = np.zeros(1 + X.shape[1])\n self.cost_ = []\n\n for i in range(self.n_iter):\n output = self.net_input(X)\n errors = (y - output)\n self.w_[1:] += self.eta * X.T.dot(errors)\n self.w_[0] += self.eta * errors.sum()\n cost = (errors**2).sum() / 2.0\n self.cost_.append(cost)\n return self\n\n def net_input(self, X):\n return np.dot(X, self.w_[1:]) + self.w_[0]\n\n def predict(self, X):\n return self.net_input(X)\n\nX = df[['RM']].values\ny = df['MEDV'].values\n\nsc_x = StandardScaler()\nsc_y = StandardScaler()\nX_std = sc_x.fit_transform(X)\ny_std = sc_y.fit_transform(y[:, np.newaxis]).flatten()\n\nlr = LinearRegressionGD()\nlr.fit(X_std, y_std)\n\nplt.plot(range(1, lr.n_iter+1), lr.cost_)\nplt.ylabel('SSE')\nplt.xlabel('Epoch')\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n#トレーニングサンプルと一緒にプロット\ndef lin_regplot(X, y, model):\n plt.scatter(X, y, c='steelblue', edgecolor='white', s=70)\n plt.plot(X, model.predict(X), color='black', lw=2) \n return \n\nlin_regplot(X_std, y_std, lr)\nplt.xlabel('Average number of rooms [RM] (standardized)')\nplt.ylabel('Price in $1000s [MEDV] (standardized)')\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n#Price in $1000s 軸を元の尺度に戻す\nnum_rooms_std = sc_x.transform(np.array([[5.0]]))\nprice_std = lr.predict(num_rooms_std)\nprint(\"Price in $1000s: %.3f\" % sc_y.inverse_transform(price_std))\n#--------------------------------------------------------------------------------------------\n#切片の重みの出力\nprint('Slope: %.3f' % lr.w_[1])\nprint('Intercept: %.3f' % lr.w_[0])\n#--------------------------------------------------------------------------------------------\n# scikit-learnの回帰モデル\nslr = LinearRegression()\nslr.fit(X, y)\ny_pred = slr.predict(X)\nprint('Slope: %.3f' % slr.coef_[0])\nprint('Intercept: %.3f' % slr.intercept_)\n\nlin_regplot(X, y, slr)\nplt.xlabel('Average number of rooms [RM]')\nplt.ylabel('Price in $1000s [MEDV]')\n\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n# RANSAC\nransac = RANSACRegressor(LinearRegression(), \n max_trials=100, \n min_samples=50, \n loss='absolute_loss', \n residual_threshold=5.0, \n random_state=0)\n\nransac.fit(X, y)\n#--------------------------------------------------------------------------------------------\n# RANSACのプロット\ninlier_mask = ransac.inlier_mask_\noutlier_mask = np.logical_not(inlier_mask)\n\nline_X = np.arange(3, 10, 1)\nline_y_ransac = ransac.predict(line_X[:, np.newaxis])\nplt.scatter(X[inlier_mask], y[inlier_mask],\n c='steelblue', edgecolor='white', \n marker='o', label='Inliers')\nplt.scatter(X[outlier_mask], y[outlier_mask],\n c='limegreen', edgecolor='white', \n marker='s', label='Outliers')\nplt.plot(line_X, line_y_ransac, color='black', lw=2) \nplt.xlabel('Average number of rooms [RM]')\nplt.ylabel('Price in $1000s [MEDV]')\nplt.legend(loc='upper left')\n\n# plt.show()\nplt.close()\n\nprint('Slope: %.3f' % ransac.estimator_.coef_[0])\nprint('Intercept: %.3f' % ransac.estimator_.intercept_)\n#--------------------------------------------------------------------------------------------\n# 線形回帰モデルの性能評価\nX = df.iloc[:, :-1].values\ny = df['MEDV'].values\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=0)\n\nslr = LinearRegression()\n#--------------------------------------------------------------------------------------------\n# 残差ぷろっと\nslr.fit(X_train, y_train)\ny_train_pred = slr.predict(X_train)\ny_test_pred = slr.predict(X_test)\n\nplt.scatter(y_train_pred, y_train_pred - y_train,\n c='steelblue', marker='o', edgecolor='white',\n label='Training data')\nplt.scatter(y_test_pred, y_test_pred - y_test,\n c='limegreen', marker='s', edgecolor='white',\n label='Test data')\nplt.xlabel('Predicted values')\nplt.ylabel('Residuals')\nplt.legend(loc='upper left')\nplt.hlines(y=0, xmin=-10, xmax=50, color='black', lw=2)\nplt.xlim([-10, 50])\nplt.tight_layout()\n\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n# MSE\nprint('MSE train: %.3f, test: %.3f' % (\n mean_squared_error(y_train, y_train_pred),\n mean_squared_error(y_test, y_test_pred)))\n#--------------------------------------------------------------------------------------------\n# R^2\nprint('R^2 train: %.3f, test: %.3f' % (\n r2_score(y_train, y_train_pred),\n r2_score(y_test, y_test_pred)))\n#--------------------------------------------------------------------------------------------\n# 線形回帰\nridge = Ridge(alpha=1.0)\nlasso = Lasso(alpha=1.0)\nelanet = ElasticNet(alpha=1.0, l1_ratio=0.5)\n#--------------------------------------------------------------------------------------------\n# 多項式\nX = np.array([258.0, 270.0, 294.0, \n 320.0, 342.0, 368.0, \n 396.0, 446.0, 480.0, 586.0])\\\n [:, np.newaxis]\n\ny = np.array([236.4, 234.4, 252.8, \n 298.6, 314.2, 342.2, \n 360.8, 368.0, 391.2,\n 390.8])\n\nlr = LinearRegression()\npr = LinearRegression()\n# 二次の多項式特徴量のクラスをインスタンス化\nquadratic = PolynomialFeatures(degree=2)\nX_quad = quadratic.fit_transform(X)\n\nlr.fit(X, y)\n# np.newaxisで列ベクトルにする\nX_fit = np.arange(250, 600, 10)[:, np.newaxis]\ny_lin_fit = lr.predict(X_fit)\n\npr.fit(X_quad, y)\ny_quad_fit = pr.predict(quadratic.fit_transform(X_fit))\n\n# plot results\nplt.scatter(X, y, label='training points')\nplt.plot(X_fit, y_lin_fit, label='linear fit', linestyle='--')\nplt.plot(X_fit, y_quad_fit, label='quadratic fit')\nplt.legend(loc='upper left')\n\nplt.tight_layout()\n# plt.show()\nplt.close()\n\n\ny_lin_pred = lr.predict(X)\ny_quad_pred = pr.predict(X_quad)\n\nprint('Training MSE linear: %.3f, quadratic: %.3f' % (\n mean_squared_error(y, y_lin_pred),\n mean_squared_error(y, y_quad_pred)))\nprint('Training R^2 linear: %.3f, quadratic: %.3f' % (\n r2_score(y, y_lin_pred),\n r2_score(y, y_quad_pred)))\n#--------------------------------------------------------------------------------------------\n# 線形・二次元・三次元の比較\nX = df[['LSTAT']].values\ny = df['MEDV'].values\n\nregr = LinearRegression()\n\n# create quadratic features\nquadratic = PolynomialFeatures(degree=2)\ncubic = PolynomialFeatures(degree=3)\nX_quad = quadratic.fit_transform(X)\nX_cubic = cubic.fit_transform(X)\n\n# fit features\nX_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis]\n\n# 線形回帰\nregr = regr.fit(X, y)\ny_lin_fit = regr.predict(X_fit)\nlinear_r2 = r2_score(y, regr.predict(X))\n\n# 二次\nregr = regr.fit(X_quad, y)\ny_quad_fit = regr.predict(quadratic.fit_transform(X_fit))\nquadratic_r2 = r2_score(y, regr.predict(X_quad))\n\n# 三次\nregr = regr.fit(X_cubic, y)\ny_cubic_fit = regr.predict(cubic.fit_transform(X_fit))\ncubic_r2 = r2_score(y, regr.predict(X_cubic))\n\n\n# plot results\nplt.scatter(X, y, label='training points', color='lightgray')\n\nplt.plot(X_fit, y_lin_fit, \n label='linear (d=1), $R^2=%.2f$' % linear_r2, \n color='blue', \n lw=2, \n linestyle=':')\n\nplt.plot(X_fit, y_quad_fit, \n label='quadratic (d=2), $R^2=%.2f$' % quadratic_r2,\n color='red', \n lw=2,\n linestyle='-')\n\nplt.plot(X_fit, y_cubic_fit, \n label='cubic (d=3), $R^2=%.2f$' % cubic_r2,\n color='green', \n lw=2, \n linestyle='--')\n\nplt.xlabel('% lower status of the population [LSTAT]')\nplt.ylabel('Price in $1000s [MEDV]')\nplt.legend(loc='upper right')\n\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n# 仮説検証\nX_log = np.log(X)\ny_sqrt = np.sqrt(y)\n\n# fit features\nX_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis]\n\nregr = regr.fit(X_log, y_sqrt)\ny_lin_fit = regr.predict(X_fit)\nlinear_r2 = r2_score(y_sqrt, regr.predict(X_log))\n\n# plot results\nplt.scatter(X_log, y_sqrt, label='training points', color='lightgray')\n\nplt.plot(X_fit, y_lin_fit, \n label='linear (d=1), $R^2=%.2f$' % linear_r2, \n color='blue', \n lw=2)\n\nplt.xlabel('log(% lower status of the population [LSTAT])')\nplt.ylabel('$\\sqrt{Price \\; in \\; \\$1000s \\; [MEDV]}$')\nplt.legend(loc='lower left')\n\nplt.tight_layout()\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n# 決定木回帰\nX = df[['LSTAT']].values\ny = df['MEDV'].values\n\ntree = DecisionTreeRegressor(max_depth=3)\ntree.fit(X, y)\n\n# argsortはソート後のインデックスを返し、flattenは1次元の配列を返す\nsort_idx = X.flatten().argsort()\n\nlin_regplot(X[sort_idx], y[sort_idx], tree)\nplt.xlabel('% lower status of the population [LSTAT]')\nplt.ylabel('Price in $1000s [MEDV]')\n# plt.show()\nplt.close()\n#--------------------------------------------------------------------------------------------\n# ランダムフォレスト回帰\nX = df.iloc[:, :-1].values\ny = df['MEDV'].values\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.4, random_state=1)\n\nforest = RandomForestRegressor(n_estimators=1000, \n criterion='mse', \n random_state=1, \n n_jobs=-1)\nforest.fit(X_train, y_train)\ny_train_pred = forest.predict(X_train)\ny_test_pred = forest.predict(X_test)\n\nprint('MSE train: %.3f, test: %.3f' % (\n mean_squared_error(y_train, y_train_pred),\n mean_squared_error(y_test, y_test_pred)))\nprint('R^2 train: %.3f, test: %.3f' % (\n r2_score(y_train, y_train_pred),\n r2_score(y_test, y_test_pred)))\n\nplt.scatter(y_train_pred, \n y_train_pred - y_train, \n c='steelblue',\n edgecolor='white',\n marker='o', \n s=35,\n alpha=0.9,\n label='training data')\nplt.scatter(y_test_pred, \n y_test_pred - y_test, \n c='limegreen',\n edgecolor='white',\n marker='s', \n s=35,\n alpha=0.9,\n label='test data')\n\nplt.xlabel('Predicted values')\nplt.ylabel('Residuals')\nplt.legend(loc='upper left')\nplt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='black')\nplt.xlim([-10, 50])\nplt.tight_layout()\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"matplotlib.pyplot.legend",
"numpy.dot",
"sklearn.metrics.r2_score",
"numpy.sqrt",
"sklearn.linear_model.ElasticNet",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.hlines",
"sklearn.linear_model.Lasso",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.logical_not",
"numpy.log",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.Ridge",
"numpy.corrcoef",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.xlim",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler"
]
] |
b1quint/samfp
|
[
"1cd9b85851c02dc61a2294d67a309f62083d358d"
] |
[
"samfp/phmxtractor.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\"\"\"\n Phase-map eXtractor\n by Bruno C Quint\n\n v1a - Phase extraction for Fabry-Perot.\n 2014.04.16 15:45 - Created an exception for errors while trying to access\n 'CRPIX%' cards on cube's header.\n\n Todo\n ----\n - Add debug option to argparse\n - Add log-to-a-file option to argparse\n - Add multithread/multiprocess\n - Use astropy.ccdproc\n - Verify code\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport glob\nimport multiprocessing\nimport numpy as np\nimport os\nimport time\nimport scipy\nimport sys\n\nfrom astropy.modeling import models, fitting\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nfrom scipy import interpolate, signal\n\nfrom .tools import plots, version\nfrom samfp import io\n\n_log = io.logger.get_logger(__name__)\n\n__all__ = ['main', 'PhaseMapExtractor', 'PeakFinder']\n\n\ndef main():\n \"\"\"Main method that runs the Phase-map Extraction.\"\"\"\n\n # Parse arguments\n args = _parse_arguments()\n\n if args.quiet:\n _log.setLevel('ERROR')\n elif args.debug:\n _log.setLevel('DEBUG')\n else:\n _log.setLevel('INFO')\n\n # Start program\n start = time.time()\n _log.info(\"\")\n _log.info(\"SAM-FP Tools: PHase-Map eXtractor\")\n _log.info(\"by Bruno Quint ([email protected])\")\n _log.info(\"version {:s}\".format(version.__str__))\n _log.info(\"Starting program.\")\n _log.info(\"\")\n\n _log.info(\"Extracting phase-map from file: %s\" % args.filename)\n _log.info('')\n\n # Checking input data\n _check_dimensions(args.filename, _log)\n\n # # Extracting phase-map\n phase_map_extractor = PhaseMapExtractor(\n args.filename,\n args.wavelength,\n correlation=args.correlation,\n show=args.show,\n verbose=not args.quiet,\n ref=args.ref,\n output=args.output\n )\n \n phase_map_extractor.run()\n\n # All done!\n end = time.time() - start\n _log.info(\"Total time elapsed: %02d:%02d:%02d\" %\n (end // 3600, end % 3600 // 60, end % 60))\n _log.info(\"All done!\\n\")\n\n return\n # End of the main function\n\n\ndef _parse_arguments():\n \"\"\"\n Method that parses the arguments given by the user via command line.\n\n Returns\n -------\n args (namespace) : a namespace with the arguments to be used by this\n script.\n \"\"\"\n _about = \"Extracts the phase-map from a fits file containing a data-cube.\"\n\n parser = argparse.ArgumentParser(description=_about)\n\n parser.add_argument('filename', type=str, help=\"Input data-cube name.\")\n parser.add_argument('wavelength', type=float,\n help=\"Wavelength in A of the strongest line \"\n \"in the calibration cube.\")\n\n parser.add_argument('-c', '--correlation', action='store_true',\n help=\"Use correlation cube? true/[FALSE]\")\n parser.add_argument('-d', '--debug', action='store_true',\n help=\"Enable debug mode.\")\n parser.add_argument('-o', '--output', type=str, default=None,\n help=\"Name of the output phase-map file.\")\n parser.add_argument('-q', '--quiet', action='store_true',\n help=\"Run program quietly. true/[FALSE]\")\n parser.add_argument('-r', '--ref', type=int, nargs=2, default=None,\n help=\"Reference pixel for the correlation cube.\")\n parser.add_argument('-s', '--show', action='store_true',\n help=\"Show plots used in the process. true/[FALSE]\")\n\n args = parser.parse_args()\n return args\n\n\ndef _check_dimensions(filename, log, dimensions=3, keyword='NAXIS'):\n \"\"\"\n Method written to check the dimensions of the input fits data.\n\n Parameters\n ----------\n filename (str) : String containing path to the input filename.\n\n log (logging.Logger) : a logget for information\n\n dimensions (int) : Base number of dimensions for reference.\n\n keyword (str) : Header keyword that holds the number of axis\n (dimensions).\n \"\"\"\n header = io.pyfits.getheader(filename)\n\n if keyword not in header:\n data = io.pyfits.getdata(filename)\n ndim = data.ndim\n else:\n ndim = header[keyword]\n\n if ndim is not dimensions:\n log.error(\"INVALID DATA: wrong number of dimensions.\"\n \"Leaving now!\\n\")\n sys.exit()\n else:\n return\n\n\nclass PhaseMapExtractor:\n \"\"\"\n Class that holds the methods and sequences to perform phase-map extraction\n on a data-cube obtained with Fabry-Perot.\n\n Parameters\n ----------\n filename : str\n String that contains the path to the input data-cube.\n\n correlation : bool\n Use correlation cube? This is usefull in case that you have several\n lines in the data-cube or even if the data-cube has a low\n signal-to-noise ratio.\n\n show : bool\n Do you want the process to show plots? This is useful for\n debugging. If you know that your data is well behaved, you can\n leave this as false.\n\n verbose : bool\n Turn on verbose mode?\n\n output : str\n String that contains the path to the output phase-map.\n \"\"\"\n loading = [' ', '-', '\\\\', '|', '/']\n\n def __init__(self, filename, wavelength, correlation=False, output=None,\n ref=None, show=False, verbose=False):\n\n # Setting main configuration\n self.input_file = filename\n self.correlation = correlation\n self.output_file = output\n self.ref = ref\n self.show = show\n self.verbose = verbose\n self.wavelength = wavelength\n\n # ToDo: These parameters could be initialized with some value.\n self.extract_from = None\n self.free_spectral_range = None\n self.fsr_channel = None\n self.ref_x = None\n self.ref_y = None\n self.ref_s = None\n self.fwhm = None\n self.finesse = None\n self.phase_map = None\n\n # Reading raw data\n self.data = io.pyfits.getdata(filename)\n self.header = io.pyfits.getheader(filename)\n\n # Reading data-cube configuration\n self.width = self.header['NAXIS1']\n self.height = self.header['NAXIS2']\n self.depth = self.header['NAXIS3']\n\n # Reading Z calibration for plotting\n self.z = self.get_calibration()\n\n try:\n self.units = self.header['CUNIT3']\n except KeyError:\n self.units = \"channels\"\n\n try:\n self.current_sampling = self.header['C3_3']\n except KeyError:\n self.current_sampling = 1.0\n\n def run(self):\n\n # Subtract continuum\n self.data = self.subtract_continuum(self.data, show=self.show)\n\n # Get the correlation cube\n if self.correlation:\n self.extract_from = self.use_correlation()\n else:\n self.extract_from = self.input_file\n\n # Measure the free-spectral-range\n self.free_spectral_range, self.fsr_channel = \\\n self.get_free_spectral_range()\n\n # Get the center of the rings\n if self.ref is None:\n self.ref = self.find_reference_pixel()\n\n self.ref_x, self.ref_y = self.ref[:]\n\n self.ref_s = self.get_reference_spectrum(\n self.input_file, self.ref_x, self.ref_y, self.z, units=self.units,\n show=False)\n\n # # Calculate the FWHM\n self.fwhm = self.get_fwhm(self.z, self.ref_s, show=self.show)\n\n # # Calculate the finesse\n self.finesse = self.get_finesse()\n\n _log.info(\"Ideal number of channels: %.1f channels\"\n % round(2 * self.finesse))\n\n _log.info(\"Ideal sampling: %.1f %s / channel\" % (\n self.free_spectral_range / round(2 * self.finesse), self.units))\n\n self.phase_map = self.extract_phase_map()\n self.save()\n\n def extract_phase_map(self):\n \"\"\"\n Extract the phase-map.\n \"\"\"\n now = time.time()\n\n # Reading data\n _log.info(\"\")\n _log.info(\"Starting phase-map extraction.\")\n _log.info(\"Reading data from %s file\" % self.extract_from)\n data = io.pyfits.getdata(self.extract_from)\n\n phase_map = np.argmax(data, axis=0).astype('float64')\n phase_map -= phase_map[self.ref_y, self.ref_x]\n phase_map *= self.current_sampling\n\n _log.info(\"Done in %.2f seconds\" % (time.time() - now))\n return phase_map\n\n def find_reference_pixel(self):\n \"\"\"\n Read the reference pixel from header or find it.\n\n Returns\n -------\n ref_x (int) : X position of the center of the rings.\n ref_y (int) : Y position of the center of the rings.\n \"\"\"\n _log.info(\"\")\n _log.info(\"Finding reference pixel.\")\n\n if ('PHMREFX' in self.header) and ('PHMREFY' in self.header):\n ref_x = self.header['PHMREFX']\n ref_y = self.header['PHMREFY']\n _log.info(\"Found reference pixel found in header.\")\n _log.info(\"Using [%d, %d]\" % (ref_x, ref_y))\n\n else:\n _log.info(\"Reference pixel NOT found in header.\")\n _log.info(\"Trying to find the center of the rings.\")\n ref_x, ref_y = self.find_rings_center(self.fsr_channel)\n\n return ref_x, ref_y\n\n def find_rings_center(self, fsr_channel, n_interactions=20):\n \"\"\"\n Method used to find the center of the rings inside a FP data-cube by\n cutting it in two directions (XZ to find Y center and YZ to find X\n center), fitting a 2nd degree polynomium and get its extrema points.\n\n Parameters\n ----------\n fsr_channel (int) : free-spectral-range in number of channels\n n_interactions (int) : Number of interactions to find the center.\n\n Returns\n -------\n ref_x (int) : X position of the center of the rings.\n ref_y (int) : Y position of the center of the rings.\n \"\"\"\n now = time.time()\n depth, height, width = self.data.shape\n\n # Choosing the points\n x = (np.linspace(0.05, 0.95, 500) * width).astype(int)\n y = (np.linspace(0.05, 0.95, 500) * height).astype(int)\n\n # First guess is that the reference pixel is at the center\n ref_x = width // 2\n ref_y = height // 2\n\n # Storing reference pixels for comparison between interactions\n _log.info(\"Start center finding.\")\n old_ref_x = ref_x\n old_ref_y = ref_y\n\n # Starting interactions\n for i in range(n_interactions):\n\n # Make sure my ref pixels are within the cube\n ref_y = max(ref_y, 0)\n ref_y = min(ref_y, height - 1)\n\n ref_x = max(ref_x, 0)\n ref_x = min(ref_x, width - 1)\n\n # Get a slice\n slice_in_x = self.data[:, ref_y, x]\n slice_in_y = self.data[:, y, ref_x]\n\n # Find the peaks\n p = multiprocessing.Pool(16)\n px = PeakFinder(slice_in_x)\n py = PeakFinder(slice_in_y)\n\n peaks_x = np.array(p.map(px, range(x.size)))\n peaks_y = np.array(p.map(py, range(y.size)))\n\n # Unwrap the FSR\n peaks_x = self.unwrap_fsr(peaks_x, fsr_channel, 'Running for X')\n peaks_y = self.unwrap_fsr(peaks_y, fsr_channel, 'Running for Y')\n\n # Not that it is fixed, I can fit the parabola\n px = scipy.polyfit(x, peaks_x, 2)\n py = scipy.polyfit(y, peaks_y, 2)\n\n ref_x = int(round(- px[1] / (2.0 * px[0])))\n ref_y = int(round(- py[1] / (2.0 * py[0])))\n\n # Selecting valid data\n error_x = np.abs(peaks_x - scipy.polyval(px, x))\n error_y = np.abs(peaks_y - scipy.polyval(py, y))\n\n if self.show:\n plt.title(\"Finding center of the rings\")\n plt.clf()\n fig = plt.gcf()\n gs = gridspec.GridSpec(2, 1, height_ratios=[6, 2])\n\n ax1 = plt.subplot(gs[0])\n ax1.plot(x, peaks_x, 'b.', alpha=0.25)\n ax1.plot(x, scipy.polyval(px, x), 'b-', lw=2)\n ax1.axvline(ref_x, ls='--', c='blue', label='x')\n\n ax1.plot(y, peaks_y, 'r.', alpha=0.25)\n ax1.plot(y, scipy.polyval(py, y), 'r-', lw=2)\n ax1.axvline(ref_y, ls='--', c='red', label='y')\n\n ax1.legend(loc='best')\n ax1.grid()\n ax1.set_ylabel(\"Iteration number %d\" % (i + 1))\n\n ax2 = plt.subplot(gs[1], sharex=ax1)\n ax2.plot(x, peaks_x - scipy.polyval(px, x), 'o', color='b',\n alpha=0.25)\n ax2.plot(y, peaks_y - scipy.polyval(py, y), 'o', color='r',\n alpha=0.25)\n\n fig.add_axes(ax1)\n fig.add_axes(ax2)\n\n # Measuring the error\n xl = np.diff(peaks_x)\n yl = np.diff(peaks_x)\n cond_x = np.where(error_x <= 3 * np.abs(np.median(xl[xl != 0])),\n True, False)\n cond_y = np.where(error_y <= 3 * np.abs(np.median(yl[yl != 0])),\n True, False)\n\n x = x[cond_x]\n y = y[cond_y]\n\n # Choosing when to stop\n if (abs(old_ref_x - ref_x) <= 2) and (abs(old_ref_y - ref_y) <= 2):\n\n # try:\n #\n # # If the cube was binned this will be useful\n # ref_x = (ref_x - self.header['CRPIX1'] + 1) \\\n # * self.header['CDELT1'] + self.header['CRVAL1']\n #\n # # If the cube was binned this will be useful\n # ref_y = (ref_y - self.header['CRPIX2']) \\\n # * self.header['CDELT2'] + self.header['CRVAL2']\n #\n # except KeyError:\n # pass\n\n _log.info(\"Rings center found at: [%d, %d]\" % (ref_x, ref_y))\n _log.info(\"Done in %.2f s\" % (time.time() - now))\n\n if self.show:\n plt.tight_layout()\n plt.show()\n\n return ref_x, ref_y\n\n else:\n old_ref_x = ref_x\n old_ref_y = ref_y\n\n if self.show:\n plt.tight_layout()\n plt.show()\n\n # If my program gets here, it could not find the center.\n # So what?\n _log.warning(\"Maximum number of interactions reached.\")\n _log.warning(\"Current center position at [%d, %d]\" % (ref_x, ref_y))\n _log.warning(\"Do you want to use these numbers?\")\n\n reply = '.'\n while reply not in ' yn':\n reply = io.input('? ')\n if reply.lower() == 'n':\n _log.warning('Ok then. Moving forward.')\n sys.exit()\n if reply.lower() == 'y':\n _log.warning('Ok then. Moving forward.')\n return ref_x, ref_y\n\n _log.warning(\"Do you want to continue? [Y,n]\")\n reply = '.'\n while reply not in ' yn':\n reply = io.input('? ')\n if reply.lower() == 'n':\n _log.warning('Ok then. Leaving now.')\n sys.exit()\n\n _log.info(\"Then, enter the reference X in pixel:\")\n _log.info(\"Leave it empty to get it in the center of the image\")\n reply = '.'\n while not reply.isdigit():\n reply = io.input('? ')\n if reply == '':\n reply = self.header['NAXIS1'] // 2\n break\n ref_x = int(reply)\n\n _log.info(\"Then, enter the reference Y in pixels:\")\n _log.info(\"Leave it empty to get it in the center of the image\")\n reply = '.'\n while not reply.isdigit():\n reply = io.input('? ')\n if reply == '':\n reply = self.header['NAXIS2'] // 2\n break\n ref_y = int(reply)\n\n # If the cube was binned this will be useful\n try:\n\n ref_x = (ref_x - self.header['CRPIX1']) \\\n * self.header['CDELT1'] + self.header['CRVAL1']\n\n ref_y = (ref_y - self.header['CRPIX2']) \\\n * self.header['CDELT2'] + self.header['CRVAL2']\n\n except KeyError:\n pass\n\n _log.info(\"Done in %.2f s\" % (time.time() - now))\n _log.info(\"Using [%d, %d].\" % (ref_x, ref_y))\n\n return ref_x, ref_y\n\n def get_calibration(self):\n \"\"\"\n Return an array with the current calibration.\n \"\"\"\n z = np.arange(self.depth)\n try:\n # The \"+ 1\" change from fortran like to c like indexing\n z = z - self.header['CRPIX3'] + 1\n z = z * self.header['C3_3']\n z = z + self.header['CRVAL3']\n _log.debug('CRPIX3: %.2f' % self.header['CRPIX3'])\n _log.debug('C3_3: %.2f' % self.header['C3_3'])\n _log.debug('CRVAL3: %.2f' % self.header['CRVAL3'])\n\n except KeyError:\n _log.warning(\"! Calibration in third axis not found.\")\n _log.warning(\"! I will ignore this step.\")\n\n return z\n\n def get_finesse(self):\n \"\"\"\n Assuming you have the Free-Spectral-Range in Z unit and that\n you have the FWHM in Z units as well, calculate the finesse by the\n expressions:\n\n .. math::\n F=\\\\frac{\\\\Delta z}{\\\\delta z}\n\n Returns\n -------\n finesse : float\n \"\"\"\n finesse = self.free_spectral_range / self.fwhm\n\n if self.verbose:\n _log.info(\"Finesse = %.1f\" % finesse)\n\n return finesse\n\n def get_free_spectral_range(self):\n \"\"\"\n A quick-and-dirty way to measure the free range in FP units.\n The method subtracts each frame of the data-cube from the\n first one. Then, it calculates the absolute value and collapse\n in X and Y. The FSR is where the resulting spectrum is minimum,\n excluding (of course), the first one.\n\n Returns\n -------\n fsr : float\n Free-spectral-range in BCV units.\n\n fsr_channel : int\n Free-spectral-range in channels.\n \"\"\"\n _log.info(\"Finding the free-spectral-range.\")\n\n now = time.time()\n\n # First frame is the reference frame\n ref_frame = self.data[0, :, :]\n\n # Subtract all frames from the first frame\n data = self.data - ref_frame\n\n # Get the absolute value\n data = np.abs(data)\n\n # Sum over the spatial directions\n data = data.mean(axis=2)\n data = data.mean(axis=1)\n\n # Interpolate data\n s = interpolate.interp1d(self.z, data, kind='cubic')\n z = np.linspace(self.z[5], self.z[-1], 10000)\n\n # Find the free-spectral-range in z units\n fsr = np.abs(z[np.argmin(s(z))] - self.z[0])\n _log.info('FSR = %.2f' % fsr)\n\n # Find the free-spectral-range in number of channels\n temp = self.z - z[np.argmin(s(z))]\n temp = np.abs(temp)\n fsr_c = np.argmin(temp)\n\n # Fix for python style\n fsr_c = fsr_c + 1\n _log.info('FSR channel = %d' % fsr_c)\n\n # Plot to see how it goes\n if self.show:\n plots.free_spectral_range(self.z, data, s, fsr_c)\n\n # What if my cube has less than a FSR or could not find it?\n if fsr_c == 5:\n\n _log.warning(\"FSR could not be found.\")\n _log.info(\"Do you want to continue? [Y,n]\")\n\n reply = '.'\n while reply not in ' yn':\n reply = io.input('? ')\n if reply.lower() == 'n':\n sys.exit()\n\n _log.info(\"Then, enter a FSR in Z units (usually BCV):\")\n reply = '.'\n while not reply.isdigit():\n reply = io.input('? ')\n fsr = float(reply)\n\n _log.info(\"Then, enter a FSR in number of channels:\")\n reply = '.'\n while not reply.isdigit():\n reply = io.input('? ')\n fsr_c = int(reply)\n\n elif fsr_c == len(data):\n fsr = np.abs(self.z[0] - self.z[-1])\n _log.info(\"It seems that you scanned exactly over a FSR.\")\n _log.info(\"If not, check your data and phasemap_fit again.\")\n\n # Calculate the sampling\n sampling = fsr / fsr_c\n\n _log.info(\"FSR = %.1f %s\" % (fsr, self.units))\n _log.info(\" = %d channels\" % fsr_c)\n _log.info(\"Sampling = %.1f %s / channel\" % (sampling, self.units))\n _log.info(\"Done in %.2f s\" % (time.time() - now))\n\n return fsr, fsr_c\n\n @staticmethod\n def get_fwhm(z, s, show=False):\n \"\"\"\n Returns the full-width-at-half-maximum using different models. These\n models can be displayed.\n\n Parameters\n ----------\n z (array like) : the abscissa of the reference spectrum containing\n the bcv values for each channel or the channel itself.\n\n s (array like) : the ordinate of the reference spectrum containing\n the intensity at each channel or at each bcv value.\n\n show (bool, optional) : display plots?\n\n Returns\n -------\n fwhm (float) : the full-width-at-half-maximum in units equal to z\n (either channel or bcv).\n \"\"\"\n\n # Clear data\n p = np.percentile(s, 50.)\n s_ = s.copy()\n s_[s < p] = 0.\n\n # Find maxima avoiding the borders\n k = signal.general_gaussian(10, 1, 5)\n cc = signal.correlate(s_[5:-5], k, mode=\"same\")\n arg_maxima = np.array([np.argmax(cc)]) + 5\n\n # arg_maxima = signal.argrelmax(s_[2:-2], order=5)[0]\n _log.debug('Peaks found at: ' + np.array2string(arg_maxima,\n separator=', '))\n\n fitter = fitting.LevMarLSQFitter()\n g_fwhm, l_fwhm, gauss = [], [], 0\n\n for (i, argm) in enumerate(arg_maxima):\n\n g = models.Gaussian1D(amplitude=s_[argm], mean=z[argm], stddev=3.)\n g_fit = fitter(g, z[1:-1], s_[1:-1])\n g_fwhm.append(g_fit.stddev * 2.355)\n\n l_model = models.Lorentz1D(amplitude=s_[argm], x_0=z[argm], fwhm=3.)\n l_fit = fitter(l_model, z[1:-1], s_[1:-1])\n l_fwhm.append(l_fit.fwhm)\n \n g_rms = np.sqrt(np.mean((s - g_fit(z)) ** 2))\n l_rms = np.sqrt(np.mean((s - l_fit(z)) ** 2))\n\n _log.info(\"Peak at {:2d}\".format(argm))\n _log.info(\"gaussian fit rms: {:.2f}\".format(g_rms))\n _log.info(\"lorentzian fit rms: {:.2f}\".format(l_rms))\n\n if g_rms > l_rms:\n gauss += 1\n else:\n gauss -= 1\n\n g_fwhm = np.mean(g_fwhm)\n l_fwhm = np.mean(l_fwhm)\n _log.info(\"Gaussian fwhm: {:.2f}\".format(g_fwhm))\n _log.info(\"Lorentzian fwhm: {:.2f}\".format(l_fwhm))\n\n if gauss > 0:\n fwhm_measured = g_fwhm\n elif gauss < 0:\n fwhm_measured = l_fwhm\n else:\n fwhm_measured = (g_fwhm + l_fwhm) * 0.5\n\n if show:\n\n z_ = np.linspace(z[0], z[-1], 1000)\n fig, axs = plt.subplots(2, 1, sharex='all')\n axs[0].plot(z, s, 'ko')\n axs[0].plot(z[5:-5], cc / cc.max() * s_.max(), 'y-', label='Cross-correlation')\n axs[0].grid()\n\n if len(arg_maxima) > 0:\n\n axs[0].plot(z_, g_fit(z_), 'b-')\n axs[0].plot(z_, l_fit(z_), 'r--')\n axs[0].legend(loc='best')\n axs[0].set_ylabel('Normalized spectrum')\n\n for amax in arg_maxima:\n axs[0].axvline(z[amax], c='k', ls='--', alpha=0.5)\n\n # Display the errors\n axs[1].plot(z, s - g_fit(z), 'bx', alpha=0.5, label='Error - Gaussian Fit')\n axs[1].plot(z, s - l_fit(z), 'ro', alpha=0.5, label='Error - Lorentzian Fit')\n axs[1].set_ylabel('Fir Errors [adu]')\n axs[1].set_xlabel('z [bcv]')\n axs[1].legend(loc='best')\n axs[1].grid()\n\n plt.tight_layout()\n plt.show()\n\n return fwhm_measured\n\n @staticmethod\n def get_reference_spectrum(input_file, x, y, z, units='--', show=False):\n \"\"\"\n Get the reference spectrum.\n \"\"\"\n from scipy.stats import mode\n\n ref_s = io.pyfits.getdata(input_file)[:, y, x]\n ref_s /= ref_s.max() # Normalize\n ref_s -= ref_s.mean() # Remove mean to avoid triangular shape\n ref_s -= mode(ref_s)[0] # Try to put zero on zero\n\n if show:\n plt.figure()\n plt.title(\"Reference Spectrum\")\n plt.plot(z, ref_s, 'ko-', label=\"Reference spectrum\")\n plt.grid()\n plt.xlabel(\"z [%s]\" % units)\n plt.tight_layout()\n plt.show()\n\n return ref_s\n\n @staticmethod\n def unwrap_fsr(peaks, fsr_channel, running_for=None):\n \"\"\"\n Use clusters of data to identify regions that are wrapped and\n unwrap it using the fsr in number of channels.\n\n Parameters\n ----------\n peaks (numpy.ndarray) : 1D array containing the peaks with more than a\n FSR.\n fsr_channel (int) : the FSR in number of channels.\n\n Returns\n -------\n peaks (numpy.ndarray) : 1D array containing the peaks unwrapped.\n\n To Do: Test with more than 1 FSR.\n \"\"\"\n\n indexes = np.argsort(peaks)\n sorted_peaks = np.sort(peaks)\n diff_sorted_peaks = np.diff(sorted_peaks)\n temp = np.abs(diff_sorted_peaks)\n\n where = np.abs(temp - np.median(temp)) < np.std(temp)\n temp[where] = 0\n split_indexes = signal.argrelmax(temp)[0]\n\n split_y_indexes = np.split(indexes, split_indexes + 1)\n for (i, idx) in enumerate(split_y_indexes):\n peaks[idx] -= fsr_channel * i\n\n return peaks\n\n def use_correlation(self):\n \"\"\"\n Use correlation data-cube.\n \"\"\"\n _log.info(\"A correlation cube will be used.\")\n _log.info(\"Looking for an existing correlation data-cube \"\n \"in the current folder.\")\n\n candidates = glob.glob(\"*.fits\")\n\n corr_cube = None\n for candidate in candidates:\n if 'CORRFROM' in io.pyfits.getheader(candidate):\n if io.pyfits.getheader(candidate)['CORRFROM'] == self.input_file:\n _log.info(\"Correlation cube to be used: %s\" % candidate)\n return candidate\n\n if corr_cube is None:\n _log.info(\"Correlation cube not found. Creating a new one.\")\n data = io.pyfits.getdata(self.input_file)\n corr_cube = np.empty_like(data)\n\n x = np.arange(self.width)\n y = np.arange(self.height)\n x, y = np.meshgrid(x, y)\n x, y = np.ravel(x), np.ravel(y)\n\n for i in range(x.size):\n s = data[:, y[i], x[i]]\n s = s / s.max() # Normalize\n s = s - s.mean() # Remove mean to avoid triangular shape\n s = np.correlate(s, self.ref_s, mode='same')\n corr_cube[:, y[i], x[i]] = s\n\n temp = ((i + 1) * 100.00 / x.size)\n sys.stdout.write('\\r %2d%% ' % temp)\n sys.stdout.write(self.loading[int(temp * 10 % 5)])\n sys.stdout.flush()\n\n _log.info(\"Done.\")\n corr_name = os.path.splitext(self.input_file)[0] + '--corrcube.fits'\n _log.info(\"Saving correlation cube to %s\" % corr_name)\n\n corr_hdr = self.header.copy()\n corr_hdr.set('CORRFROM', self.input_file, 'Cube used for corrcube.')\n corr_hdr.set('', '', before='CORRFROM')\n corr_hdr.set('', '--- Correlation cube ---', before='CORRFROM')\n\n io.pyfits.writeto(corr_name, corr_cube, corr_hdr, overwrite=True)\n del corr_hdr\n del corr_cube\n\n return corr_name\n\n def save(self):\n\n # Getting the input information to work on in\n f = os.path.splitext(self.input_file)[0]\n fsr = round(self.free_spectral_range, 2)\n h = io.pyfits.Header()\n\n # Setting what is the reference pixels\n h.set('PHMREFX', value=self.ref_x, comment='Rings center - x')\n h.set('PHMREFY', value=self.ref_y, comment='Rings center - y',\n after='PHMREFX')\n\n h.add_blank('', before='PHMREFX')\n h.add_blank('--- PHM Xtractor ---', before='PHMREFX')\n\n # Store information gathered\n h.set('PHMTYPE', value='observed', comment='', after='PHMREFY')\n h.set('PHMREFF', value=self.input_file, comment='Original file',\n after='PHMTYPE')\n h.set('PHMWCAL', value=self.wavelength,\n comment='Wavelength for calibration', after='PHMREFF')\n h.set('PHM_FSR', value=fsr, comment='FSR in %s units' % self.units,\n after='PHMWCAL')\n h.set('PHMUNIT', value=self.units, comment='Units for z and FSR.',\n after='PHM_FSR')\n h.set('PHMSAMP', value=self.current_sampling,\n comment=\"Sampling per channel\", after='PHMUNIT')\n\n self.phase_map = self.phase_map - self.phase_map[\n self.ref_y, self.ref_x]\n\n try:\n del (h['CRPIX3'], h['CRVAL3'], h['C3_3'], h['CDELT3'])\n except KeyError:\n pass\n\n filename = io.safe_save(f + \"--obs_phmap.fits\", overwrite=True,\n verbose=self.verbose)\n\n _log.info(\"Saving observed phase-map to file: %s\" % filename)\n io.pyfits.writeto(filename, self.phase_map, h, overwrite=True)\n\n filename = io.safe_save(f + \"--ref_spec.fits\", overwrite=True,\n verbose=self.verbose)\n\n _log.info(\"Saving reference spectrum to file: %s\" % filename)\n io.pyfits.writeto(filename, self.ref_s, h, overwrite=True)\n\n return\n\n @staticmethod\n def subtract_continuum(data, show=False):\n\n ordered_data = np.sort(data, axis=0)\n continuum = np.median(ordered_data[:5], axis=0)\n del ordered_data\n\n data -= continuum\n return data\n\n\nclass PeakFinder:\n\n def __init__(self, data):\n\n assert data.ndim == 2\n self.data = data\n\n def __call__(self, i):\n\n data = self.data[:, i]\n data -= np.median(data)\n data = np.where(data > 0.70 * np.max(data), data, 0)\n n = int(data.shape[0] * 0.2)\n peaks = signal.argrelmax(data, axis=0, order=n)[0]\n peak = np.min(peaks)\n\n return peak\n"
] |
[
[
"numpy.split",
"scipy.polyfit",
"numpy.linspace",
"scipy.signal.correlate",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.argmin",
"numpy.mean",
"scipy.signal.general_gaussian",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.empty_like",
"matplotlib.pyplot.gcf",
"numpy.std",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.argmax",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplot",
"numpy.ravel",
"numpy.array2string",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"scipy.signal.argrelmax",
"numpy.median",
"numpy.argsort",
"matplotlib.pyplot.show",
"numpy.meshgrid",
"numpy.correlate",
"numpy.abs",
"scipy.polyval",
"matplotlib.pyplot.subplots",
"numpy.percentile",
"numpy.sort",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.grid",
"scipy.stats.mode",
"matplotlib.pyplot.xlabel"
]
] |
dbstein/ipde
|
[
"834e16a617f47a3eabe3307ba151d5b7db527b30"
] |
[
"examples/semi_lagrangian_experiments/unsteady_semi_experiment_curvilinear.py"
] |
[
"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport finufftpy\nimport time\nimport pybie2d\nfrom ipde.embedded_boundary import EmbeddedBoundary\nfrom ipde.ebdy_collection import EmbeddedBoundaryCollection, EmbeddedFunction, BoundaryFunction\nfrom ipde.heavisides import SlepianMollifier\nfrom personal_utilities.arc_length_reparametrization import arc_length_parameterize\nfrom ipde.derivatives import fd_x_4, fd_y_4, fourier\nfrom fast_interp import interp1d\nstar = pybie2d.misc.curve_descriptions.star\nGSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary\nGrid = pybie2d.grid.Grid\n\n\"\"\"\nTest semi-lagrangian solve...\n\"\"\"\n\n# max time\nmax_time = 0.05\n# set timestep\ndt = 0.05\n# number of boundary points\nnb = 300\n# number of grid points\nng = int(nb/2)\n# number of chebyshev modes\nM = 8\n# padding zone\npad_zone = 3\n# smoothness of rolloff functions\nslepian_r = 2*M\n\n# generate a velocity field\nkk = 2*np.pi/3\nu_function = lambda x, y, t: np.sin(kk*x)*np.cos(kk*y)*(1+np.cos(2*np.pi*t))\nv_function = lambda x, y, t: -np.cos(kk*x)*np.sin(kk*y)*(1+np.cos(2*np.pi*t))\nc0_function = lambda x, y: np.exp(np.cos(kk*x))*np.sin(kk*y)\n\n# gradient function\ndef gradient(f):\n\tfh = np.fft.fft2(f)\n\tfx = np.fft.ifft2(fh*ikx).real\n\tfy = np.fft.ifft2(fh*iky).real\n\treturn fx, fy\n\n################################################################################\n# Get truth via just using Forward Euler on periodic domain\n\n# generate a grid\nv, h = np.linspace(-1.5, 1.5, ng, endpoint=False, retstep=True)\nx, y = np.meshgrid(v, v, indexing='ij')\n# fourier modes\nkv = np.fft.fftfreq(ng, h/(2*np.pi))\nkv[int(ng/2)] = 0.0\nkx, ky = np.meshgrid(kv, kv, indexing='ij')\nikx, iky = 1j*kx, 1j*ky\n\n# initial c field\nc0 = c0_function(x, y)\n\nprint('Testing Forward-Euler Method')\nst = time.time()\nt = 0.0\nc = c0.copy()\nwhile t < max_time-1e-10:\n\tcx, cy = gradient(c)\n\tu = u_function(x, y, t)\n\tv = v_function(x, y, t)\n\tc -= dt*(u*cx + v*cy)\n\tt += dt\n\tprint(' t = {:0.3f}'.format(t), max_time, '\\r', end='')\nc_eulerian = c.copy()\ntime_eulerian = time.time() - st\n\n################################################################################\n# Semi-Lagrangian Non-linear departure point method\n\nprint('Testing Linear Departure Method')\n\n# get heaviside function\nMOL = SlepianMollifier(slepian_r)\n\n# construct boundary and reparametrize\nbdy = GSB(c=star(nb, x=0.0, y=0.0, a=0.1, f=3))\nbdy = GSB(*arc_length_parameterize(bdy.x, bdy.y))\nbh = bdy.dt*bdy.speed.min()\n# construct embedded boundary\nebdy = EmbeddedBoundary(bdy, True, M, bh, pad_zone, MOL.step)\nebdyc = EmbeddedBoundaryCollection([ebdy,])\n# get a grid\ngrid = Grid([-1.5, 1.5], ng, [-1.5, 1.5], ng, x_endpoints=[True, False], y_endpoints=[True, False])\n# register the grid\nprint('\\nRegistering the grid')\nebdyc.register_grid(grid)\n\n# initial c field\nc0 = EmbeddedFunction(ebdyc)\nc0.define_via_function(c0_function)\n\n# now timestep\nc = c0.copy()\nt = 0\n# while t < max_time-1e-10:\n\nst = time.time()\n# get the velocity fields\nu = EmbeddedFunction(ebdyc)\nu.define_via_function(lambda x, y: u_function(x, y, t))\nv = EmbeddedFunction(ebdyc)\nv.define_via_function(lambda x, y: v_function(x, y, t))\nprint('Time to compute u: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n\n# get the velocity fields on the boundary\nub = ebdyc.interpolate_radial_to_boundary(u)\nvb = ebdyc.interpolate_radial_to_boundary(v)\nprint('Time to interp u to bdy: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n\n# first, move the boundary with the fluid velocity\nbx = ebdy.bdy.x + dt*ub.bdy_value_list[0]\nby = ebdy.bdy.y + dt*vb.bdy_value_list[0]\nprint('Time to move bdy: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n\n# take gradients of the velocity fields\ndx = lambda f: fd_x_4(f, grid.xh, periodic_fix=True)\ndy = lambda f: fd_y_4(f, grid.yh, periodic_fix=True)\nux, uy = ebdyc.gradient2(u, dx, dy, cutoff=False)\nvx, vy = ebdyc.gradient2(v, dx, dy, cutoff=False)\nprint('Time to compute u gradients: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n\n# now generate a new ebdy based on the moved boundary\nnew_bdy = GSB(*arc_length_parameterize(bx, by))\nbh = new_bdy.dt*new_bdy.speed.min()\n# construct embedded boundary\nnew_ebdy = EmbeddedBoundary(new_bdy, True, M, bh, pad_zone, MOL.step)\nnew_ebdyc = EmbeddedBoundaryCollection([new_ebdy,])\nnew_ebdyc.register_grid(grid)\nprint('Time to generate new ebdy and register grid: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n\n# let's get the points that need to be interpolated to\ngp = new_ebdyc.grid_pna\nap = new_ebdyc.radial_pts\n\nfrom pybie2d.point_set import PointSet\naax = np.concatenate([gp.x, ap.x])\naay = np.concatenate([gp.y, ap.y])\naap = PointSet(x=aax, y=aay)\n\nAP_key = ebdy.register_points(aap.x, aap.y)\nprint('Time to register new points with old ebdy |A: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n\n# register these with ebdy\ngp_key = ebdy.register_points(gp.x, gp.y)\nprint('Time to register new points with old ebdy |g: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\nap_key = ebdy.register_points(ap.x, ap.y, nearly_radial=True)\nprint('Time to register new points with old ebdy |a: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n\n# now we need to interpolate onto things\nAEP = ebdy.registered_partitions[ap_key]\nGEP = ebdy.registered_partitions[gp_key]\n\n# generate a holding ground for the new c\nc_new = EmbeddedFunction(new_ebdyc)\nc_new.zero()\n\n# advect those in the annulus\n# category 1\nc1, c2, c3 = AEP.get_categories()\nc1n, c2n, c3n = AEP.get_category_Ns()\nuxh = ebdy.interpolate_to_points(ux, ap.x, ap.y)\nuyh = ebdy.interpolate_to_points(uy, ap.x, ap.y)\nvxh = ebdy.interpolate_to_points(vx, ap.x, ap.y)\nvyh = ebdy.interpolate_to_points(vy, ap.x, ap.y)\nuh = ebdy.interpolate_to_points(u, ap.x, ap.y)\nvh = ebdy.interpolate_to_points(v, ap.x, ap.y)\nSLM = np.zeros([c1n,] + [2,2], dtype=float)\nSLR = np.zeros([c1n,] + [2,], dtype=float)\nSLM[:,0,0] = 1 + dt*uxh[c1]\nSLM[:,0,1] = dt*uyh[c1]\nSLM[:,1,0] = dt*vxh[c1]\nSLM[:,1,1] = 1 + dt*vyh[c1]\nSLR[:,0] = dt*uh[c1]\nSLR[:,1] = dt*vh[c1]\nOUT = np.linalg.solve(SLM, SLR)\nxdt, ydt = OUT[:,0], OUT[:,1]\nxd, yd = ap.x[c1] - xdt, ap.y[c1] - ydt\n# udate c\nch = ebdy.interpolate_to_points(c, xd, yd)\nc_new.radial_value_list[0][c1.reshape(ebdy.radial_shape)] = ch\nprint('Time for annular advection cat 1: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n# category 2\nSLM = np.zeros([c2n,] + [2,2], dtype=float)\nSLR = np.zeros([c2n,] + [2,], dtype=float)\nSLM[:,0,0] = 1 + dt*uxh[c2]\nSLM[:,0,1] = dt*uyh[c2]\nSLM[:,1,0] = dt*vxh[c2]\nSLM[:,1,1] = 1 + dt*vyh[c2]\nSLR[:,0] = dt*uh[c2]\nSLR[:,1] = dt*vh[c2]\nOUT = np.linalg.solve(SLM, SLR)\nxdt, ydt = OUT[:,0], OUT[:,1]\nxd, yd = ap.x[c2] - xdt, ap.y[c2] - ydt\n# udate c\nch = ebdy.interpolate_to_points(c, xd, yd)\nc_new.radial_value_list[0][c2.reshape(ebdy.radial_shape)] = ch\nprint('Time for annular advection cat 2: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n# categroy 3... this is the tricky one\nif c3n > 0:\n\tth = 2*np.pi/nb\n\ttk = np.fft.fftfreq(nb, th/(2*np.pi))\n\tdef d1_der(f):\n\t\treturn np.fft.ifft(np.fft.fft(f)*tk*1j).real\n\tinterp = lambda f: interp1d(0, 2*np.pi, th, f, k=3, p=True)\n\tbx_interp = interp(ebdy.bdy.x)\n\tby_interp = interp(ebdy.bdy.y)\n\tbxs_interp = interp(d1_der(ebdy.bdy.x))\n\tbys_interp = interp(d1_der(ebdy.bdy.y))\n\tnx_interp = interp(ebdy.bdy.normal_x)\n\tny_interp = interp(ebdy.bdy.normal_y)\n\tnxs_interp = interp(d1_der(ebdy.bdy.normal_x))\n\tnys_interp = interp(d1_der(ebdy.bdy.normal_y))\n\tub = ebdy.interpolate_radial_to_boundary(u.radial_value_list[0])\n\tvb = ebdy.interpolate_radial_to_boundary(v.radial_value_list[0])\n\turb = ebdy.interpolate_radial_to_boundary_normal_derivative(u.radial_value_list[0])\n\tvrb = ebdy.interpolate_radial_to_boundary_normal_derivative(v.radial_value_list[0])\n\tub_interp = interp(ub)\n\tvb_interp = interp(vb)\n\turb_interp = interp(urb)\n\tvrb_interp = interp(vrb)\n\tubs_interp = interp(d1_der(ub))\n\tvbs_interp = interp(d1_der(vb))\n\turbs_interp = interp(d1_der(urb))\n\tvrbs_interp = interp(d1_der(vrb))\n\txo = new_ebdy.radial_x.ravel()[c3]\n\tyo = new_ebdy.radial_y.ravel()[c3]\n\tdef objective(s, r):\n\t\tf = np.empty([s.size, 2])\n\t\tf[:,0] = bx_interp(s) + r*nx_interp(s) + dt*ub_interp(s) + dt*r*urb_interp(s) - xo\n\t\tf[:,1] = by_interp(s) + r*ny_interp(s) + dt*vb_interp(s) + dt*r*vrb_interp(s) - yo\n\t\treturn f\n\tdef Jac(s, r):\n\t\tJ = np.empty([s.size, 2, 2])\n\t\tJ[:,0,0] = bxs_interp(s) + r*nxs_interp(s) + dt*ubs_interp(s) + dt*r*urbs_interp(s)\n\t\tJ[:,1,0] = bys_interp(s) + r*nys_interp(s) + dt*vbs_interp(s) + dt*r*vrbs_interp(s)\n\t\tJ[:,0,1] = nx_interp(s) + dt*urb_interp(s)\n\t\tJ[:,1,1] = ny_interp(s) + dt*vrb_interp(s)\n\t\treturn J\n\t# take as guess inds our s, r\n\ts = new_ebdy.radial_t.ravel()[c3]\n\tr = new_ebdy.radial_r.ravel()[c3]\n\t# now solve for sd, rd\n\tres = objective(s, r)\n\tmres = np.hypot(res[:,0], res[:,1]).max()\n\ttol = 1e-12\n\twhile mres > tol:\n\t\tJ = Jac(s, r)\n\t\td = np.linalg.solve(J, res)\n\t\ts -= d[:,0]\n\t\tr -= d[:,1]\n\t\tres = objective(s, r)\n\t\tmres = np.hypot(res[:,0], res[:,1]).max()\n\t\t# print(mres)\n\t# get the departure points\n\txd = bx_interp(s) + nx_interp(s)*r\n\tyd = by_interp(s) + ny_interp(s)*r\n\t# now get the c values\n\tch = ebdy.interpolate_to_points(c, xd, yd)\n\tc_new.radial_value_list[0][c3.reshape(ebdy.radial_shape)] = ch\n\tprint('Time for annular advection cat 3: {:0.1f}'.format( (time.time()-st)*1000 ))\n\tst = time.time()\n\n# advect those in the grid\n# category 1\nc1, c2, c3 = GEP.get_categories()\nc1n, c2n, c3n = GEP.get_category_Ns()\nuxh = ebdy.interpolate_to_points(ux, gp.x, gp.y)\nuyh = ebdy.interpolate_to_points(uy, gp.x, gp.y)\nvxh = ebdy.interpolate_to_points(vx, gp.x, gp.y)\nvyh = ebdy.interpolate_to_points(vy, gp.x, gp.y)\nuh = ebdy.interpolate_to_points(u, gp.x, gp.y)\nvh = ebdy.interpolate_to_points(v, gp.x, gp.y)\nSLM = np.zeros([c1n,] + [2,2], dtype=float)\nSLR = np.zeros([c1n,] + [2,], dtype=float)\nSLM[:,0,0] = 1 + dt*uxh[c1]\nSLM[:,0,1] = dt*uyh[c1]\nSLM[:,1,0] = dt*vxh[c1]\nSLM[:,1,1] = 1 + dt*vyh[c1]\nSLR[:,0] = dt*uh[c1]\nSLR[:,1] = dt*vh[c1]\nOUT = np.linalg.solve(SLM, SLR)\nxdt, ydt = OUT[:,0], OUT[:,1]\nxd, yd = gp.x[c1] - xdt, gp.y[c1] - ydt\n# udate c\nch = ebdy.interpolate_to_points(c, xd, yd)\nwork = np.empty_like(gp.x)\nwork[c1] = ch\nprint('Time for grid advection cat 1: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n# category 2\nSLM = np.zeros([c2n,] + [2,2], dtype=float)\nSLR = np.zeros([c2n,] + [2,], dtype=float)\nSLM[:,0,0] = 1 + dt*uxh[c2]\nSLM[:,0,1] = dt*uyh[c2]\nSLM[:,1,0] = dt*vxh[c2]\nSLM[:,1,1] = 1 + dt*vyh[c2]\nSLR[:,0] = dt*uh[c2]\nSLR[:,1] = dt*vh[c2]\nOUT = np.linalg.solve(SLM, SLR)\nxdt, ydt = OUT[:,0], OUT[:,1]\nxd, yd = gp.x[c2] - xdt, gp.y[c2] - ydt\nch = ebdy.interpolate_to_points(c, xd, yd)\nwork[c2] = ch\n# set the new c values\nc_new.grid_value[new_ebdyc.phys_not_in_annulus] = work\nprint('Time for grid advection cat 2: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n# overwrite under grid under annulus by radial grid\n_ = new_ebdyc.interpolate_radial_to_grid(c_new.radial_value_list, c_new.grid_value)\nprint('Time for grid interpolation: {:0.1f}'.format( (time.time()-st)*1000 ))\nst = time.time()\n\n# plot things relating to separation of points\nif False:\n\tfig, ax = plt.subplots()\n\tax.plot(ebdy.bdy.x, ebdy.bdy.y, color='black', linewidth=3)\n\tax.plot(ebdy.interface.x, ebdy.interface.y, color='black', linewidth=3)\n\tax.plot(new_ebdy.bdy.x, new_ebdy.bdy.y, color='blue' , linewidth=3)\n\tax.plot(new_ebdy.interface.x, new_ebdy.interface.y, color='blue' , linewidth=3)\n\tax.scatter(ap.x[AEP.category1], ap.y[AEP.category1], color='red')\n\tax.scatter(ap.x[AEP.category2], ap.y[AEP.category2], color='purple')\n\tax.scatter(ap.x[AEP.category3], ap.y[AEP.category3], color='green')\n\n\tfig, ax = plt.subplots()\n\tax.plot(ebdy.bdy.x, ebdy.bdy.y, color='black', linewidth=3)\n\tax.plot(ebdy.interface.x, ebdy.interface.y, color='black', linewidth=3)\n\tax.plot(new_ebdy.bdy.x, new_ebdy.bdy.y, color='blue' , linewidth=3)\n\tax.plot(new_ebdy.interface.x, new_ebdy.interface.y, color='blue' , linewidth=3)\n\tax.scatter(gp.x[GEP.category1], gp.y[GEP.category1], color='red')\n\tax.scatter(gp.x[GEP.category2], gp.y[GEP.category2], color='purple')\n\tax.scatter(gp.x[GEP.category3], gp.y[GEP.category3], color='green')\n\n# now reset naming conventions\nebdy = new_ebdy\nebdyc = new_ebdyc\nc = c_new\n\nt += dt\nprint(' t = {:0.3f}'.format(t), max_time, '\\r', end='')\n\n################################################################################\n# Evaluate\n\nerr = c.grid_value - c_eulerian\nerr = np.zeros_like(x)\nerr = (c.grid_value - c_eulerian)*ebdyc.phys\n\nerr = err[ebdyc.phys]\nerr = np.abs(err).max()\nprint(err)\n"
] |
[
[
"numpy.fft.fft2",
"numpy.linalg.solve",
"numpy.fft.ifft2",
"numpy.abs",
"numpy.linspace",
"numpy.fft.fft",
"numpy.empty_like",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.hypot",
"numpy.zeros_like",
"numpy.fft.fftfreq",
"numpy.meshgrid",
"numpy.zeros",
"numpy.empty"
]
] |
FlanFlanagan/d3ploy
|
[
"076324637d66d82b91ff34110f8458e182a28dab"
] |
[
"d3ploy/DO_solvers.py"
] |
[
"\"\"\"\nThis file manages Deterministic-optimizing libaries for the D3ploy cyclus modules.\n\n\"\"\"\nimport numpy as np\nimport statsmodels.tsa.holtwinters as hw\n\n\ndef polyfit_regression(ts, back_steps=10, degree=1):\n \"\"\"\n Fits a polynomial to the entries in timeseries [ts]\n to predict the next value.\n\n Parameters:\n -----------\n ts: Array of floats\n An array of times series data to be used for the polyfit regression\n backsteps: int\n Number of backsteps to fit. (default=all past data)\n degree: int\n Degree of the fitting polynomial\n Returns:\n --------\n x : The predicted value from the fit polynomial.\n \"\"\"\n time = range(1, len(ts) + 1)\n timeseries = np.array(list(ts.values()))\n fit = np.polyfit(time[-back_steps:],\n timeseries[-back_steps:], deg=degree)\n eq = np.poly1d(fit)\n x = eq(len(ts) + 1)\n return x\n\n\ndef exp_smoothing(ts, back_steps=10, degree=1):\n \"\"\"\n Predicts next value using simple exponential smoothing.\n Parameters:\n -----------\n ts: Array of floats\n An array of times series data to be used for the polyfit regression\n Returns:\n --------\n\n x : The predicted value from the exponential smoothing method.\n\n \"\"\"\n timeseries = np.array(list(ts.values()))\n timeseries = timeseries[-back_steps:]\n if len(timeseries) == 1:\n timeseries = [np.inf,0]\n # exponential smoothing errors when there are five datapoints\n # average is appended to the beginning of the timeseries for minimal impact\n # https://github.com/statsmodels/statsmodels/issues/4878\n elif len(timeseries) == 5:\n timeseries = np.append(np.mean(timeseries), timeseries)\n\n model = hw.SimpleExpSmoothing(timeseries)\n model_fit = model.fit()\n x = model_fit.predict(len(timeseries), len(timeseries))\n return x[0]\n\n\ndef holt_winters(ts, back_steps=10, degree=1):\n \"\"\"\n Predicts next value using triple exponential smoothing\n (holt-winters method).\n Parameters:\n -----------\n ts: Array of floats\n An array of times series data to be used for the polyfit regression\n Returns:\n --------\n x : The predicted value from the holt-winters method.\n \"\"\"\n timeseries = np.array(list(ts.values()))\n timeseries = timeseries[-back_steps:]\n # exponential smoothing errors when there is only one datapoint\n if len(timeseries) == 1:\n timeseries = [np.inf,0]\n # exponential smoothing errors when there are five datapoints\n # average is appended to the beginning of the timeseries for minimal impact\n # https://github.com/statsmodels/statsmodels/issues/4878\n elif len(timeseries) == 5:\n timeseries = np.append(np.mean(timeseries), timeseries)\n model = hw.ExponentialSmoothing(timeseries)\n model_fit = model.fit()\n x = model_fit.predict(len(timeseries), len(timeseries))\n return x[0]\n\n\ndef fft(ts, back_steps=1e6, degree=1):\n timeseries = np.array(list(ts.values()))\n timeseries = timeseries[-back_steps:]\n n = timeseries.size\n n_harm = 100 # number of harmonics in model\n t = np.arange(0, n)\n if len(t) == 1: \n p = [np.inf,0]\n else:\n p = np.polyfit(t, timeseries, degree) # find linear trend in x\n x_notrend = timeseries - p[0] * t # detrended x\n x_freqdom = np.fft.fft(x_notrend) # detrended x in frequency domain\n f = np.fft.fftfreq(n) # frequencies\n indexes = list(range(n))\n # sort indexes by frequency, lower -> higher\n indexes.sort(key=lambda i: np.absolute(f[i]))\n\n t = np.arange(0, n + 1)\n restored_sig = np.zeros(t.size)\n for i in indexes[:1 + n_harm * 2]:\n ampli = np.absolute(x_freqdom[i]) / n # amplitude\n phase = np.angle(x_freqdom[i]) # phase\n restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)\n fft_fit = restored_sig + p[0] * t\n\n return fft_fit[-1]\n"
] |
[
[
"numpy.polyfit",
"numpy.poly1d",
"numpy.absolute",
"numpy.fft.fft",
"numpy.arange",
"numpy.cos",
"numpy.mean",
"numpy.fft.fftfreq",
"numpy.angle",
"numpy.zeros"
]
] |
fschur/ODIN-with-second-order-derivatives
|
[
"6192ce3213cf5864721921d145eaf74f96e1500e"
] |
[
"odin/utils/kernels_auto.py"
] |
[
"\"\"\"\nCollection of kernel classes to be used in the Gaussian Process Regression. Compared to the standard implementation\nof the kernels, here the derivatives are not hard coded, but automatically derived using sympy.\n\nFelix Schur, ETH Zürich\n\nbased on code from\n\nGabriele Abbati, Machine Learning Research Group, University of Oxford\nFebruary 2019\n\"\"\"\n\n# Libraries\nimport tensorflow as tf\nimport numpy as np\nfrom abc import ABC\nimport sympy as sp\nimport itertools\n\n\nclass GenericKernel(ABC):\n \"\"\"\n Generic class for a Gaussian Process kernel.\n \"\"\"\n\n def __init__(self, input_dim: int, use_single_gp: bool = False, m: int = 2, sim=sp.factor):\n \"\"\"\n Constructor.\n :param input_dim: number of states.\n :param use_single_gp: boolean, indicates whether to use a single set of\n hyperparameters for all states (useful for extremely scarce data\n setting).\n :m: maximum order of derivatives to be computed\n :sim: type of simplification used for the symbolic derivatives\n \"\"\"\n self.dimensionality = tf.constant(input_dim, dtype=tf.int32)\n self._initialize_variables(use_single_gp)\n self.m = m\n self.sim = sim\n return\n\n def _initialize_variables(self, use_single_gp: bool = False) -> None:\n \"\"\"\n Initialize the hyperparameters of the kernel as TensorFlow variables.\n A logarithm-exponential transformation is used to ensure positivity\n during optimization.\n :param use_single_gp: boolean, indicates whether to use a single set of\n hyperparameters for all states (useful for extremely scarce data\n setting).\n \"\"\"\n with tf.variable_scope('gaussian_process_kernel'):\n if use_single_gp:\n self.log_lengthscale = tf.Variable(np.log(1.0),\n dtype=tf.float64,\n trainable=True,\n name='log_lengthscale')\n self.log_variance = tf.Variable(np.log(1.0),\n dtype=tf.float64,\n trainable=True,\n name='log_variance')\n self.lengthscales = \\\n tf.exp(self.log_lengthscale) \\\n * tf.ones([self.dimensionality, 1, 1], dtype=tf.float64)\n self.variances = \\\n tf.exp(self.log_variance) \\\n * tf.ones([self.dimensionality, 1, 1], dtype=tf.float64)\n else:\n self.log_lengthscales = tf.Variable(\n np.log(1.0) * tf.ones([self.dimensionality, 1, 1],\n dtype=tf.float64),\n dtype=tf.float64, trainable=True, name='log_lengthscales')\n self.log_variances = tf.Variable(\n tf.ones([self.dimensionality, 1, 1],\n dtype=tf.float64),\n dtype=tf.float64, trainable=True, name='log_variances')\n self.variances = tf.exp(self.log_variances)\n self.lengthscales = tf.exp(self.log_lengthscales)\n return\n\n def _initilize_kernel_derivatives(self, kernel_fun, x, y):\n \"\"\"\n Initializes the kernel derivatives.\n :kernel_fun: sympy function of the kernel k(x,y)\n :x: 'x' in sympy\n :y: 'y' in sympy\n \"\"\"\n self.derivatives = [[None]*(self.m+1) for _ in range(self.m+1)]\n for i, j in itertools.product(range(self.m+1), range(self.m+1)):\n if i == 0 and j == 0:\n self.derivatives[i][j] = kernel_fun\n elif j == 0:\n self.derivatives[i][j] = self.sim(sp.diff(self.derivatives[i-1][j], x))\n else:\n self.derivatives[i][j] = self.sim(sp.diff(self.derivatives[i][j-1], y))\n return\n\n def _prepare_input(self, x, y):\n \"\"\"\n Prepares the inputs to fit into the tensorflow derivative function.\n :x: 'x' the tensorflow tensor of k(x, y)\n :y: 'y' the tensorflow tensor of k(x, y)\n \"\"\"\n shape = x.shape\n x = tf.reshape(x, [1, shape[0]])\n y = tf.reshape(y, [1, shape[0]])\n l = tf.reshape(self.lengthscales, [self.lengthscales.shape[0], 1])\n v = tf.reshape(self.variances, [self.variances.shape[0], 1])\n\n x_new = tf.repeat(x, shape[0], axis=1)\n x_new = tf.repeat(x_new, self.dimensionality, axis=0)\n\n y_new = tf.concat([y] * shape[0], axis=1)\n y_new = tf.repeat(y_new, self.dimensionality, axis=0)\n\n v_new = tf.repeat(v, shape[0] * shape[0], axis=1)\n l_new = tf.repeat(l, shape[0] * shape[0], axis=1)\n return x_new, y_new, l_new, v_new\n\n def compute_c_phi(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the kernel covariance matrix between xx and\n yy for each state:\n c_phi[n_s, i, j] = kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n dim = xx.shape[0]\n cov_matrix = self.derivatives[0][0](*self._prepare_input(xx, yy))\n cov_matrix = tf.reshape(cov_matrix, [-1, dim, dim])\n return cov_matrix\n\n def compute_diff_c_phi(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the derivative of the kernel covariance\n matrix between xx and yy with respect to xx, for each state:\n diff_c_phi[n_s, i, j] = d/dxx kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n dim = xx.shape[0]\n out = self.derivatives[1][0](*self._prepare_input(xx, yy))\n out = tf.reshape(out, [-1, dim, dim])\n return out\n\n def compute_c_phi_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the derivative of the kernel covariance\n matrix between xx and yy with respect to yy, for each state:\n diff_c_phi[n_s, i, j] = d/dyy kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n return - self.compute_diff_c_phi(xx, yy)\n\n def compute_diff_c_phi_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the derivative of the kernel covariance\n matrix between xx and yy with respect to xx and yy, for each state:\n diff_c_phi[n_s, i, j] = d/dxx d/dyy kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n dim = xx.shape[0]\n out = self.derivatives[1][1](*self._prepare_input(xx, yy))\n out = tf.reshape(out, [-1, dim, dim])\n return out\n\n def compute_diff_diff_c_phi(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the derivative of the kernel covariance\n matrix between xx and yy with respect to xx twice,\n for each state:\n diff_c_phi[n_s, i, j] =\n d^2/dxx^2 kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n dim = xx.shape[0]\n out = self.derivatives[2][0](*self._prepare_input(xx, yy))\n out = tf.reshape(out, [-1, dim, dim])\n return out\n\n def compute_c_phi_diff_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the derivative of the kernel covariance\n matrix between xx and yy with respect to y twice, for each state:\n diff_c_phi[n_s, i, j] = d^2/dyy^2 kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n\n return self.compute_diff_diff_c_phi(xx, yy)\n\n def compute_diff_diff_c_phi_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the derivative of the kernel covariance\n matrix between xx and yy with respect to twice xx and y, for each state:\n diff_c_phi[n_s, i, j] = d^2/dxx^2 d/dyy kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n dim = xx.shape[0]\n out = self.derivatives[2][1](*self._prepare_input(xx, yy))\n out = tf.reshape(out, [-1, dim, dim])\n return out\n\n def compute_diff_c_phi_diff_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the derivative of the kernel covariance\n matrix between xx and yy with respect to xx and twice to yy,\n for each state:\n diff_c_phi[n_s, i, j] =\n d/dxx d^2/dyy^2 kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n return - self.compute_diff_diff_c_phi_diff(xx, yy)\n\n def compute_diff_diff_c_phi_diff_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n To be implemented, compute the derivative of the kernel covariance\n matrix between xx and yy with respect to xx twice and twice to yy,\n for each state:\n diff_c_phi[n_s, i, j] =\n d^2/dxx^2 d^2/dyy^2 kernel(xx[i], yy[j])_{n_s}\n The shape of the returned tensor is [n_states, n_points, n_points]\n :param xx: input tensor;\n :param yy: input tensor;\n :return: the tensor containing the covariance matrices.\n \"\"\"\n dim = xx.shape[0]\n out = self.derivatives[2][2](*self._prepare_input(xx, yy))\n out = tf.reshape(out, [-1, dim, dim])\n return out\n\n\nclass RBFKernel(GenericKernel):\n \"\"\"\n Implementation of the Radial Basis Function kernel.\n \"\"\"\n def __init__(self, input_dim: int, use_single_gp: bool = False, m: int = 2):\n super(RBFKernel, self).__init__(input_dim, use_single_gp, m)\n\n self.x, self.y, self.l, self.v = sp.symbols('x y l v', real=True)\n function = self.v * sp.exp(-(self.x-self.y)**2.0 / 2.0 / self.l**2.0)\n self._initilize_kernel_derivatives(function, self.x, self.y)\n\n for i, j in itertools.product(range(self.m+1), range(self.m+1)):\n self.derivatives[i][j] = sp.lambdify([self.x, self.y, self.l, self.v], self.derivatives[i][j], \"tensorflow\")\n\n return\n\n\ndef tf_dirac_delta(inp, der=\"not_given\"):\n return tf.zeros(inp.shape, dtype=tf.float64)\n\n\ndef tf_sign(inp):\n \"\"\"\n Custom translation function of the sign-function for sympy lamdify.\n \"\"\"\n cond = tf.less_equal(tf.abs(inp), 1e-10)\n out = tf.where(cond, tf.ones(shape=inp.shape, dtype=tf.float64), tf.cast(tf.sign(inp), dtype=tf.float64))\n return out\n\n\nclass Matern52Kernel(GenericKernel):\n \"\"\"\n Implementation of the Matern 5/2 kernel.\n \"\"\"\n def __init__(self, input_dim: int, use_single_gp: bool = False, m: int = 2):\n super(Matern52Kernel, self).__init__(input_dim, use_single_gp, m)\n self.x, self.y, self.l, self.v = sp.symbols('x y l v', real=True)\n function = self.v * (1.0 + sp.sqrt(5.0) / self.l * abs(self.x - self.y) + 5.0 / 3.0 / self.l**2 *\n (self.x - self.y)**2) * sp.exp(-sp.sqrt(5.0) / self.l * abs(self.x - self.y))\n self._initilize_kernel_derivatives(function, self.x, self.y)\n\n for i, j in itertools.product(range(self.m+1), range(self.m+1)):\n self.derivatives[i][j] = sp.lambdify([self.x, self.y, self.l, self.v], self.derivatives[i][j],\n modules=['tensorflow', {'DiracDelta': tf_dirac_delta,\n 'sign': tf_sign}])\n\n\nclass Matern32Kernel(GenericKernel):\n \"\"\"\n Implementation of the Matern 3/2 kernel.\n \"\"\"\n def __init__(self, input_dim: int, use_single_gp: bool = False, m: int = 1):\n super(Matern32Kernel, self).__init__(input_dim, use_single_gp, m)\n self.x, self.y, self.l, self.v = sp.symbols('x y l v', real=True)\n function = self.v * (1.0 + sp.sqrt(3.0) / self.l * abs(self.x - self.y)) * sp.exp(-sp.sqrt(3.0) / self.l *\n abs(self.x - self.y))\n self._initilize_kernel_derivatives(function, self.x, self.y)\n\n for i, j in itertools.product(range(self.m+1), range(self.m+1)):\n self.derivatives[i][j] = sp.lambdify([self.x, self.y, self.l, self.v], self.derivatives[i][j],\n modules=['tensorflow', {'DiracDelta': tf_dirac_delta,\n 'sign': tf_sign}])\n\n\nclass RationalQuadraticKernel(GenericKernel):\n \"\"\"\n Implementation of the Rational-Quadratic kernel.\n \"\"\"\n def __init__(self, input_dim: int, use_single_gp: bool = False,\n alpha: float = 1.0, m: int = 2):\n super(RationalQuadraticKernel,\n self).__init__(input_dim, use_single_gp, sim=sp.simplify, m=m)\n self.alpha = tf.constant(alpha, dtype=tf.float64)\n self.x, self.y, self.l, self.v, self.a = sp.symbols('x y l v a', real=True)\n function = self.v*(1 + (self.x - self.y)**2 / (2 * self.a * self.l**2))**(-self.a)\n self._initilize_kernel_derivatives(function, self.x, self.y)\n\n for i, j in itertools.product(range(self.m + 1), range(self.m + 1)):\n self.derivatives[i][j] = sp.lambdify([self.x, self.y, self.l, self.v, self.a], self.derivatives[i][j],\n \"tensorflow\")\n\n return\n\n def _prepare_input(self, x, y):\n \"\"\"\n We need to overwrite the prepare input function here, since RQK has different variables.\n \"\"\"\n shape = x.shape\n x = tf.reshape(x, [1, shape[0]])\n y = tf.reshape(y, [1, shape[0]])\n l = tf.reshape(self.lengthscales, [self.lengthscales.shape[0], 1])\n v = tf.reshape(self.variances, [self.variances.shape[0], 1])\n\n x_new = tf.repeat(x, shape[0], axis=1)\n x_new = tf.repeat(x_new, self.dimensionality, axis=0)\n\n y_new = tf.concat([y] * shape[0], axis=1)\n y_new = tf.repeat(y_new, self.dimensionality, axis=0)\n\n v_new = tf.repeat(v, shape[0] * shape[0], axis=1)\n l_new = tf.repeat(l, shape[0] * shape[0], axis=1)\n\n alpha_new = tf.reshape(self.alpha, [1, 1])\n alpha_new = tf.repeat(alpha_new, shape[0] * shape[0], axis=1)\n alpha_new = tf.repeat(alpha_new, self.dimensionality, axis=0)\n\n return x_new, y_new, l_new, v_new, alpha_new\n\n\nclass SigmoidKernel(GenericKernel):\n \"\"\"\n Implementation of the Sigmoid kernel.\n \"\"\"\n def __init__(self, input_dim: int, use_single_gp: bool = False, m: int = 2):\n super(SigmoidKernel,\n self).__init__(input_dim, use_single_gp, m)\n self.x, self.y, self.v, self.a_sp, self.b_sp = sp.symbols('x y v a b', real=True)\n function = self.v * sp.asin((self.a_sp + self.b_sp * self.x * self.y) /\n sp.sqrt((self.a_sp + self.b_sp * self.x**2 + 1) *\n (self.a_sp + self.b_sp * self.y**2 + 1)))\n self._initilize_kernel_derivatives(function, self.x, self.y)\n\n for i, j in itertools.product(range(self.m + 1), range(self.m + 1)):\n self.derivatives[i][j] = sp.lambdify([self.x, self.y, self.v, self.a_sp, self.b_sp],\n self.derivatives[i][j], \"tensorflow\")\n\n return\n\n def _prepare_input(self, x, y):\n \"\"\"\n We need to overwrite the prepare input function here, since Sigmoid Kernel has different variables.\n \"\"\"\n shape = x.shape\n x = tf.reshape(x, [1, shape[0]])\n y = tf.reshape(y, [1, shape[0]])\n v = tf.reshape(self.variances, [self.variances.shape[0], 1])\n a = tf.reshape(self.a, [self.a.shape[0], 1])\n b = tf.reshape(self.b, [self.b.shape[0], 1])\n\n x_new = tf.repeat(x, shape[0], axis=1)\n x_new = tf.repeat(x_new, self.dimensionality, axis=0)\n\n y_new = tf.concat([y] * shape[0], axis=1)\n y_new = tf.repeat(y_new, self.dimensionality, axis=0)\n\n v_new = tf.repeat(v, shape[0] * shape[0], axis=1)\n a_new = tf.repeat(a, shape[0] * shape[0], axis=1)\n b_new = tf.repeat(b, shape[0] * shape[0], axis=1)\n\n return x_new, y_new, v_new, a_new, b_new\n\n def _initialize_variables(self, use_single_gp: bool = False) -> None:\n \"\"\"\n We need to overwrite the prepare input function here, since the Sigmoid kernel has different variables.\n \"\"\"\n with tf.variable_scope('gaussian_process_kernel'):\n if use_single_gp:\n self.log_a_single = tf.Variable(np.log(1.0), dtype=tf.float64,\n trainable=True,\n name='sigmoid_a')\n self.log_b_single = tf.Variable(np.log(1.0), dtype=tf.float64,\n trainable=True,\n name='sigmoid_b')\n self.log_variance = tf.Variable(np.log(1.0), dtype=tf.float64,\n trainable=True,\n name='log_variance')\n self.a = \\\n tf.exp(self.log_a_single) \\\n * tf.ones([self.dimensionality, 1, 1], dtype=tf.float64)\n self.b = \\\n tf.exp(self.log_b_single) \\\n * tf.ones([self.dimensionality, 1, 1], dtype=tf.float64)\n self.variances = \\\n tf.exp(self.log_variance) \\\n * tf.ones([self.dimensionality, 1, 1], dtype=tf.float64)\n else:\n self.log_a = tf.Variable(np.log(1.0) *\n tf.ones([self.dimensionality, 1, 1],\n dtype=tf.float64),\n dtype=tf.float64,\n trainable=True,\n name='sigmoid_a')\n self.log_b = tf.Variable(np.log(1.0) *\n tf.ones([self.dimensionality, 1, 1],\n dtype=tf.float64),\n dtype=tf.float64,\n trainable=True,\n name='sigmoid_b')\n self.log_variances = tf.Variable(\n np.log(1.0) * tf.ones([self.dimensionality, 1, 1],\n dtype=tf.float64),\n dtype=tf.float64, trainable=True, name='variances')\n self.a = tf.exp(self.log_a)\n self.b = tf.exp(self.log_b)\n self.variances = tf.exp(self.log_variances)\n return\n\n def compute_c_phi_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Need to redefine because Sigmoid kernel is not symmetric.\n \"\"\"\n dim = xx.shape[0]\n out = self.derivatives[0][1](*self._prepare_input(xx, yy))\n out = tf.reshape(out, [-1, dim, dim])\n return out\n\n def compute_c_phi_diff_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Need to redefine because Sigmoid kernel is not symmetric.\n \"\"\"\n dim = xx.shape[0]\n out = self.derivatives[0][2](*self._prepare_input(xx, yy))\n out = tf.reshape(out, [-1, dim, dim])\n return out\n\n def compute_diff_c_phi_diff_diff(self, xx: tf.Tensor,\n yy: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Need to redefine because Sigmoid kernel is not symmetric.\n \"\"\"\n dim = xx.shape[0]\n out = self.derivatives[1][2](*self._prepare_input(xx, yy))\n out = tf.reshape(out, [-1, dim, dim])\n return out\n\n"
] |
[
[
"numpy.log",
"tensorflow.sign",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.zeros",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.repeat",
"tensorflow.exp",
"tensorflow.variable_scope",
"tensorflow.abs"
]
] |
aida-ugent/FIPR
|
[
"723b3330fd95542803bf72184411b3fcfa48c168"
] |
[
"src/predictors/maxent_predictor.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport torch\nimport torch.utils.data as torch_data\nfrom tqdm import tqdm\n\nfrom fip.fairness_loss import FairnessLoss\nfrom .predictor import Predictor\nfrom utils.adjacency_data import build_adjacency_matrix, AdjacencySampler, map_edges\n\n\nclass MaxEntPredictor(Predictor):\n def __init__(self,\n fip_strength=0,\n fip_type='',\n nb_epochs=100,\n learning_rate=None,\n batch_size=None,\n **kwargs):\n super().__init__(**kwargs)\n\n self.fip_strength = fip_strength\n self.fip_type = fip_type\n self.nb_epochs = nb_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n\n self._lambdas = None\n\n def fit(self, train_data, attributes, **kwargs):\n if self.batch_size is not None:\n raise ValueError(\"batch_size was not None, but batching does not work properly for fip loss in LBFGS!\")\n\n try:\n partition_mask = attributes['partition']\n attributes = attributes.drop(columns=['partition'])\n except KeyError:\n partition_mask = None\n\n # Use the predictor's parameters as parameters for the lambdas.\n lambdas_kwargs = self.get_params()\n\n if partition_mask is not None:\n self._lambdas = LambdasPerPartPair(partition_mask=partition_mask, **lambdas_kwargs)\n else:\n self._lambdas = Lambdas(**lambdas_kwargs)\n self._lambdas.fit(train_data, attributes)\n\n def get_embeddings(self, ids):\n return np.ones((ids.shape[0], 1), dtype=np.float)\n\n def predict(self, edges):\n return self._lambdas(edges).detach().numpy()\n\n def predict_logits(self, edges):\n return self._lambdas(edges, as_logits=True)\n\n\nclass Lambdas(torch.nn.Module):\n def __init__(self,\n fip_strength=1,\n fip_type='',\n nb_epochs=100,\n learning_rate=1e-2,\n batch_size=10 ** 7\n ):\n super().__init__()\n\n self.fip_strength = fip_strength\n self.fip_type = fip_type\n self.nb_epochs = nb_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n\n self._id_to_idx = None\n self._F_matrix = None\n self._la = None\n\n def fit(self, train_edges, attributes):\n self._id_to_idx = pd.Series(index=attributes.index, data=np.arange(len(attributes)))\n train_edges = map_edges(train_edges, self._id_to_idx)\n A = build_adjacency_matrix(train_edges, nb_nodes=len(attributes))\n A_sampler = AdjacencySampler(A, batch_nb_rows=None)\n data_loader = torch_data.DataLoader(dataset=A_sampler, num_workers=0, batch_size=None)\n\n # Construct FeatureMatrix object that returns a sparse tensor of features for the datapoints.\n nb_rows, nb_cols = A.shape\n self._F_matrix = FeatureMatrix(nb_rows=nb_rows, nb_cols=nb_cols)\n nb_features = self._F_matrix.get_nb_features()\n\n self._la = torch.nn.Linear(in_features=nb_features, out_features=1, bias=False).float()\n\n optimizer = torch.optim.LBFGS(self._la.parameters(), lr=1, max_iter=self.nb_epochs, history_size=50,\n line_search_fn='strong_wolfe')\n # optimizer = torch.optim.Adam(self._la.parameters(), lr=1e-3)\n pred_loss_f = torch.nn.BCEWithLogitsLoss()\n\n if self.fip_strength > 0:\n fairness_loss_f = FairnessLoss(self.fip_type)\n\n print(\"Learning MaxEnt distribution...\")\n with tqdm() as pbar:\n # Important: I could not get LBFGS to work properly with minibatching. Therefore, I aggregate the losses\n # of the entire dataset before backpropagation. Note that if the length of the dataset is not a multiple of\n # the batch size, our last batch will be smaller. This is accounted for in this implementation.\n def closure():\n if torch.is_grad_enabled():\n optimizer.zero_grad()\n bce_loss = torch.zeros(1, dtype=torch.float)\n fairness_loss = torch.zeros(1, dtype=torch.float)\n for batch_data in data_loader:\n edges, labels = batch_data\n logits = self(edges, as_logits=True, idx_in_known_format=True)\n bce_loss += pred_loss_f(logits, labels.float()) * (edges.shape[0] / len(data_loader))\n\n if self.fip_strength > 0:\n fairness_loss += fairness_loss_f(torch.sigmoid(logits), edges, labels, attributes) \\\n * (edges.shape[0] / len(data_loader))\n\n total_loss = (bce_loss + self.fip_strength * fairness_loss)\n if total_loss.requires_grad:\n total_loss.backward()\n loss_val = total_loss.item()\n\n progress_text = f\"BCE loss: {bce_loss.item():.8f}\"\n if self.fip_strength > 0:\n progress_text += f\", fip loss: {fairness_loss.item() * self.fip_strength:.8f}\"\n pbar.set_description(progress_text)\n pbar.update()\n\n self._last_loss_vals = bce_loss.item(), self.fip_strength * fairness_loss.item()\n return loss_val\n\n # LBFGS already finds the `optimal' parameters, so there should only be one update.\n optimizer.step(closure)\n\n def forward(self, edges, as_logits=False, idx_in_known_format=False):\n if self._la is None:\n raise ValueError(\"MaxEnt lambdas not fitted!\")\n if not idx_in_known_format:\n if isinstance(edges, torch.Tensor):\n edges = edges.numpy()\n edges = torch.from_numpy(map_edges(edges, self._id_to_idx))\n\n features = self._F_matrix.features_of_row_col(edges)\n lambdas_sum = torch.squeeze(self._la(features))\n if as_logits:\n return lambdas_sum\n\n probs = torch.sigmoid(lambdas_sum)\n return probs\n\n\nclass ZerosLambdas(Lambdas):\n def fit(self, train_edges, attributes):\n pass\n\n def forward(self, edges, as_logits=False, idx_in_known_format=False):\n if as_logits:\n return torch.ones(edges.shape[0]).float() * (-np.inf)\n return torch.zeros(edges.shape[0]).float()\n\n\nclass LambdasPerPartPair(torch.nn.Module):\n def __init__(self,\n partition_mask,\n undirected=True,\n **kwargs):\n super().__init__()\n\n self._undirected = undirected\n self._lambdas_params = kwargs\n\n partitions = np.unique(partition_mask)\n nb_partitions = partitions.shape[0]\n if not np.all(partitions == np.arange(nb_partitions)):\n raise ValueError(\"Block mask did not contain 0-indexed ordinal values!\")\n self._partition_mask = partition_mask\n self._lambdas_per_partpair = np.empty((nb_partitions, nb_partitions), dtype=np.object)\n\n def fit(self, train_edges, attributes):\n partpairs = map_edges(train_edges, self._partition_mask)\n nb_partitions = self._lambdas_per_partpair.shape[0]\n for src_part in range(nb_partitions):\n if self._undirected:\n first_dst_part = src_part\n else:\n first_dst_part = 0\n\n src_part_matches = partpairs[:, 0] == src_part\n for dst_part in range(first_dst_part, nb_partitions):\n dst_part_matches = partpairs[:, 1] == dst_part\n partpair_match = np.logical_and(src_part_matches, dst_part_matches)\n partpair_edges = train_edges[partpair_match]\n\n if partpair_edges.shape[0] > 0:\n partpair_lambdas = Lambdas(**self._lambdas_params)\n partpair_attrs = attributes[self._partition_mask.isin([src_part, dst_part])]\n partpair_lambdas.fit(partpair_edges, partpair_attrs)\n else:\n partpair_lambdas = ZerosLambdas()\n self._lambdas_per_partpair[src_part, dst_part] = partpair_lambdas\n\n def forward(self, edges, as_logits=False):\n if not isinstance(self._lambdas_per_partpair[0, 0], Lambdas):\n raise ValueError(\"MaxEnt lambdas not fitted!\")\n if isinstance(edges, torch.Tensor):\n edges = edges.numpy()\n\n partpairs = map_edges(edges, self._partition_mask)\n uniq_partpairs = np.unique(partpairs, axis=0)\n\n predictions = torch.empty(edges.shape[0], dtype=torch.float)\n for partpair in uniq_partpairs:\n partpair_match = (partpairs == partpair).all(axis=1)\n partpair_edges = edges[partpair_match]\n\n # If needed, use the symmetrical lambdas.\n src_part, dst_part = partpair[0], partpair[1]\n if self._undirected and src_part > dst_part:\n src_part, dst_part = dst_part, src_part\n partpair_edges[:, [1, 0]] = partpair_edges[:, [0, 1]]\n\n partpair_lambdas = self._lambdas_per_partpair[src_part, dst_part]\n predictions[partpair_match] = partpair_lambdas(partpair_edges, as_logits=as_logits)\n return predictions\n\n\nclass FeatureMatrix:\n def __init__(self,\n nb_rows=None,\n nb_cols=None):\n self._features = []\n if nb_rows is not None:\n self._features.append(EdgeSource(dim=nb_rows))\n if nb_cols is not None:\n self._features.append(EdgeDest(dim=nb_cols))\n\n # Precompute the number of features.\n self._nb_features = 0\n for feature in self._features:\n self._nb_features += feature.nb_features()\n\n def features_of_row_col(self, edges):\n nb_fm_rows = edges.shape[0]\n\n all_fm_row_coords = []\n all_fm_col_coords = []\n all_fm_data = []\n\n feature_pointer = 0\n for feature in self._features:\n fm_row_coords, fm_col_coords, fm_data = feature.feature_matrix(feature_pointer, edges)\n all_fm_row_coords.append(fm_row_coords)\n all_fm_col_coords.append(fm_col_coords)\n all_fm_data.append(fm_data)\n feature_pointer += feature.nb_features()\n\n all_fm_row_coords = np.concatenate(all_fm_row_coords)\n all_fm_col_coords = np.concatenate(all_fm_col_coords)\n all_fm_data = np.concatenate(all_fm_data)\n\n indices = torch.from_numpy(np.vstack((all_fm_row_coords, all_fm_col_coords)))\n values = torch.from_numpy(all_fm_data)\n feature_matrix = torch.sparse_coo_tensor(indices, values, size=(nb_fm_rows, self._nb_features),\n dtype=torch.float)\n return feature_matrix\n\n def get_nb_features(self):\n return self._nb_features\n\n\nclass Feature:\n def __init__(self, dim=None):\n self._dim = dim\n\n def feature_matrix(self, feature_pointer, edges):\n raise NotImplementedError\n\n def nb_features(self):\n raise NotImplementedError\n\n\n# Corresponds with row constraint.\nclass EdgeSource(Feature):\n def feature_matrix(self, feature_pointer, edges):\n nb_fm_rows = edges.shape[0]\n fm_row_coords = np.arange(nb_fm_rows, dtype=np.int)\n fm_col_coords = edges[:, 0] + feature_pointer\n fm_data = np.ones(nb_fm_rows, dtype=np.float)\n return fm_row_coords, fm_col_coords, fm_data\n\n def nb_features(self):\n return self._dim\n\n\n# Corresponds with col constraint.\nclass EdgeDest(Feature):\n def feature_matrix(self, feature_pointer, edges):\n nb_fm_rows = edges.shape[0]\n fm_row_coords = np.arange(nb_fm_rows, dtype=np.int)\n fm_col_coords = edges[:, 1] + feature_pointer\n fm_data = np.ones(nb_fm_rows, dtype=np.float)\n return fm_row_coords, fm_col_coords, fm_data\n\n def nb_features(self):\n return self._dim\n"
] |
[
[
"torch.sigmoid",
"torch.ones",
"torch.empty",
"numpy.unique",
"torch.zeros",
"numpy.arange",
"numpy.vstack",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"numpy.ones",
"numpy.concatenate",
"torch.sparse_coo_tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.Linear",
"torch.is_grad_enabled",
"numpy.logical_and",
"numpy.empty"
]
] |
oraschewski/CommunityFirnModel
|
[
"3041cd604d9a5c7eed7cbfd676934d970417818c"
] |
[
"CFM_main/reader.py"
] |
[
"#!usr/bin/env python\n'''\nFunctions to read model inputs.\n'''\n\nimport os\nimport numpy as np\n# from string import join\nfrom constants import *\nimport h5py\n\ndef read_input(filename,StartDate=None):\n '''\n Read in data from csv input files\n\n :param filename: name of the file which holds the accumulation rate data\n\n :return input_data: vector of field of interest (e.g. temperature, accumulation rate from a specified csv file\n :return input_year: corresponding time vector (in years)\n '''\n\n spot = os.getcwd()\n\n FID = os.path.join(spot, filename)\n data = np.loadtxt(FID, delimiter=',') #changed 3/6/17 to loadtxt from genfromtxt; much faster\n xx,yy = np.shape(data)\n n_data = np.minimum(xx, yy)\n if n_data==2:\n if xx>yy:\n input_year = data[:, 0]\n input_data = data[:, 1]\n else:\n input_year = data[0, :]\n input_data = data[1, :]\n else:\n if xx>yy:\n input_year = data[:, 0]\n input_data = np.transpose(data[:, 1:n_data])\n else:\n input_year = data[0, :]\n input_data = data[1:n_data, :]\n\n if StartDate==None:\n pass\n else:\n StartInd = np.where(input_year>=StartDate)[0]\n input_year = input_year[StartInd]\n input_data = input_data[StartInd]\n\n return input_data, input_year\n\ndef read_init(folder, resultsFileName, varname):\n\n '''\n Read in data for initial depth, age, density, and temperature to run the model without spinup\n\n :param folder: the folder containing the files holding depth, age, density, and temperature\n\n '''\n f5 = h5py.File(os.path.join(folder, resultsFileName),'r')\n init_value = f5[varname][:]\n f5.close()\n\n return init_value\n\n\n# def read_snowmelt(file):\n# '''\n# Read in data for initial melt rates\n\n# :param file: name of the file which holds the accumulation rate data\n\n# :return input_bdot: accumulation rate vector from a specified csv file\n# :return input_year_bdot: corresponding time vector (in years)\n# '''\n\n# spot = os.getcwd()\n\n# FID_melt = os.path.join(spot, file)\n# data_melt = np.genfromtxt(FID_melt, delimiter=',')\n# input_year_melt = data_melt[0, :]\n# input_melt = data_melt[1, :]\n\n# return input_snowmelt, input_year_snowmelt\n\n# def read_snowmelt(file):\n# '''\n# Read in data for initial melt rates\n\n# :param file: name of the file which holds the accumulation rate data\n\n# :return input_bdot: accumulation rate vector from a specified csv file\n# :return input_year_bdot: corresponding time vector (in years)\n# '''\n\n# spot = os.getcwd()\n\n# FID_melt = os.path.join(spot, file)\n# data_melt = np.genfromtxt(FID_melt, delimiter=',')\n# input_year_melt = data_melt[0, :]\n# input_melt = data_melt[1, :]\n\n# return input_snowmelt, input_year_snowmelt\n"
] |
[
[
"numpy.minimum",
"numpy.shape",
"numpy.transpose",
"numpy.where",
"numpy.loadtxt"
]
] |
crazywiden/Hangman_AI_solver
|
[
"b2b1028b6d26bc3454e59ecb3ee4ab27642b6317"
] |
[
"main.py"
] |
[
"\nimport argpase\nimport collections\nimport pandas as pd\nimport numpy as np\nfrom model import RNN_model\nimport torch\n\ndef arg_parser():\n parser = argparse.ArgumentParser(description=\"hangman game config\")\n parser.add_argument(\"--train_path\", type=str, default=\"words_250000_train.txt\",\n help=\"path of the train dictionary\")\n parser.add_argument(\"--lives\", type=int, default=6,\n help=\"upper limit of fail guesses\")\n args = parser.parse_args()\n return args\n\n\ndef load_model(model_path):\n model = RNN_model(target_dim=26, hidden_units=16)\n checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint['state_dict'])\n model.eval()\n return model\n\n\nclass HangmanGame(object):\n def __init__(self, train_set_path, model_path=\"model.pth\", n_gram=2):\n self.guessed_letters = []\n full_dictionary_location = train_set_path\n self.full_dictionary = self.build_dictionary(full_dictionary_location)\n self.full_dictionary_common_letter_sorted = collections.Counter(\"\".join(self.full_dictionary)).most_common()\n self.freq_by_length = self.init_df(self.full_dictionary)\n self.n_gram = self.init_n_gram(n_gram)\n self.current_dictionary = []\n self.history_condition = []\n self.model = load_model(model_path)\n\n def find_by_gram(self, all_gram, pre=None, suff=None):\n selected_gram = []\n for key, val in all_gram.items():\n if (pre is not None) and (key[0] == pre):\n selected_gram.append((key[1], val))\n if (suff is not None) and (key[1] == suff):\n selected_gram.append((key[0], val))\n\n res = {}\n for letter, freq in selected_gram:\n if letter not in res:\n res[letter] = freq\n else:\n res[letter] += freq\n final_res = [(key, val) for key, val in res.items()]\n return sorted(final_res, key=lambda x: x[1], reverse=True)\n\n def gen_n_gram(self, word, n):\n n_gram = []\n for i in range(n, len(word)+1):\n if word[i-n:i] not in n_gram:\n n_gram.append(word[i-n:i])\n return n_gram\n\n def init_n_gram(self, n):\n n_gram = {-1:[]}\n for word in self.full_dictionary:\n single_word_gram = self.gen_n_gram(word, n)\n if len(word) not in n_gram:\n n_gram[len(word)] = single_word_gram\n else:\n n_gram[len(word)].extend(single_word_gram)\n n_gram[-1].extend(single_word_gram)\n res = {}\n for key in n_gram.keys():\n res[key] = collections.Counter(n_gram[key])\n return res\n\n def freq_from_df(self, df):\n key, cnt = np.unique(df.values, return_counts=True)\n freq = [(k, val) for k, val in zip(key, cnt)]\n return sorted(freq, key=lambda x: x[1], reverse=True)\n\n def update_df(self, df, condition):\n \"\"\"\n :param df: dataframe\n each column is one location of a word\n each row is a word\n :param condition: dictionary\n key is letter\n value is which index does this letter appear\n means we only select the words which has letter <value> at index <key>\n note that we don't select words that has letter <value> at other index\n e.g. if condition = {1:'a'}, then \"app\" is selected while \"aha\" not\n :return:\n df: updated dataframe\n \"\"\"\n if len(condition) == 0:\n return df\n\n for letter, idx in condition.items():\n # find rows satisfy\n # 1. corresponding column == val\n # 2. all the other column != val\n query = \"\"\n for i in range(df.shape[1]):\n col = df.columns.values[i]\n if i in idx:\n query += \"{} == '{}' and \".format(col, letter)\n else:\n query += \"{} != '{}' and \".format(col, letter)\n query = query[:-5]\n new_df = df.query(query)\n df = new_df.copy()\n del new_df\n return df\n\n def init_df(self, dictionary):\n \"\"\"\n use words list to generate dictionary frequency\n each key is word length\n each value is a dataframe with column is location of each length\n \"\"\"\n group_by_length = collections.defaultdict(list)\n for word in dictionary:\n group_by_length[len(word)].append(word)\n\n res = {}\n for key in group_by_length.keys():\n word_list = group_by_length[key]\n tmp = pd.DataFrame([list(word) for word in word_list])\n tmp.columns = [chr(i + 97) for i in range(tmp.shape[1])]\n res[key] = tmp\n return res\n\n def gen_condition(self, word):\n tmp = {i: word[i] for i in range(len(word)) if word[i] != \"_\"}\n condition = {}\n for key, val in tmp.items():\n if val not in condition:\n condition[val] = [key]\n else:\n condition[val].append(key)\n return condition\n\n def encode_obscure_words(self, word):\n word_idx = [ord(i) - 97 if i != \"_\" else 26 for i in word]\n obscured_word = np.zeros((len(word), 27), dtype=np.float32)\n for i, j in enumerate(word_idx):\n obscured_word[i, j] = 1\n return obscured_word\n\n def guess(self, word): # word input example: \"_ p p _ e \"\n\n # divided word group by word length\n all_words = self.freq_by_length[len(word)]\n all_gram = self.n_gram[-1]\n # all_gram = self.n_gram[len(word)]\n\n # first guess by letter frequency in each word group\n new_condition = self.gen_condition(word)\n\n if len(self.history_condition) != 0 and new_condition != self.history_condition[-1]:\n self.history_condition.append(new_condition)\n\n all_words = self.update_df(all_words, new_condition)\n freq = self.freq_from_df(all_words)\n for i in range(len(freq)):\n if freq[i][0] not in self.guessed_letters:\n return freq[i][0]\n\n # if we run out of letters, use 2-gram to predict\n for i in range(len(word)):\n if word[i] == \"_\": # this is where we should apply 2-gram\n if (i == 0) or (word[i-1] == \"_\"):\n guess = self.find_by_gram(all_gram, pre=None, suff=word[i+1])\n elif (i == len(word) - 1) or (word[i+1] == \"_\"):\n guess = self.find_by_gram(all_gram, pre=word[i-1], suff=None)\n else:\n guess = self.find_by_gram(all_gram, pre=word[i-1], suff=word[i+1])\n break\n\n for i in range(len(guess)):\n if guess[i][0] not in self.guessed_letters:\n return guess[i][0]\n # if we run out of 2-gram, use LSTM model to predict\n # the benefit of LSTM model is to add more uncertainty to the prediction\n guessed_multi_hot = np.zeros(26, dtype=np.float32)\n for letter in self.guessed_letters:\n idx = ord(letter) - 97\n guessed_multi_hot[idx] = 1.0\n\n obscure_words = self.encode_obscure_words(word)\n obscure_words = np.asarray(obscure_words)\n guessed_multi_hot = np.asarray(guessed_multi_hot)\n obscure_words = torch.from_numpy(obscure_words)\n guessed_multi_hot = torch.from_numpy(guessed_multi_hot)\n out = self.model(obscure_words, guessed_multi_hot)\n guess = torch.argmax(out, dim=2).item()\n guess = chr(guess + 97)\n return guess\n\n def build_dictionary(self, dictionary_file_location):\n text_file = open(dictionary_file_location, \"r\")\n full_dictionary = text_file.read().splitlines()\n text_file.close()\n return full_dictionary\n\n def get_current_word(self):\n \"\"\"\n combine target word and guessed letters to generate obscured word\n \"\"\"\n word_seen = [letter if letter in self.guessed_letters else \"_\" for letter in self.target_word]\n return word_seen\n\n def start_game(self, num_lives=6, verbose=True):\n\n self.target_word = input(\"please enter a word for the computer to guess:\")\n # reset guessed letters to empty set and current plausible dictionary to the full dictionary\n self.guessed_letters = []\n self.current_dictionary = self.full_dictionary\n tries_remains = num_lives\n\n word_seen = self.get_current_word()\n if verbose:\n print(\"Successfully start a new game! # of tries remaining: {0}. Word: {1}.\".format(tries_remains, word_seen))\n\n while tries_remains > 0:\n # get guessed letter from user code\n guess_letter = self.guess(word_seen)\n\n # append guessed letter to guessed letters field in hangman object\n self.guessed_letters.append(guess_letter)\n if verbose:\n print(\"Guessing letter: {0}\".format(guess_letter))\n\n word_seen = self.get_current_word()\n print(\"current word:{}\".format(word_seen))\n\n if \"_\" not in word_seen:\n print(\"Successfully finished game!! The word is:{}, {} tries left\".format(word_seen, tries_remains))\n return True\n\n if guess_letter not in self.target_word:\n tries_remains -= 1\n\n print(\"# of tries exceeded!\")\n return False\n\nif __name__ == \"__main__\":\n args = arg_parser()\n train_set = args.train_set\n game = HangmanGame(train_set)\n game.start_game(args.lives)"
] |
[
[
"numpy.unique",
"numpy.asarray",
"torch.load",
"torch.from_numpy",
"numpy.zeros",
"torch.argmax"
]
] |
JoePowers/multifil
|
[
"f2a60b6265f7278cbfe618de6cadc29642270bb6"
] |
[
"multifil/af.py"
] |
[
"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\naf.py - An actin filament\n\nCreate and maintain a thin filament and the subgroups that comprise it.\n\nCreated by Dave Williams on 2010-01-04.\n\"\"\"\n\nimport numpy as np\n\n\nclass BindingSite(object):\n \"\"\"A singular globular actin site\"\"\"\n def __init__(self, parent_thin_fil, index, orientation):\n \"\"\"Create a binding site on the thin filament\n \n Parameters:\n parent_thin_fil: the calling thin filament instance\n index: the axial index on the parent thin filament\n address: largest to most local, indices for finding this\n orientation: select between six orientations (0-5)\n \"\"\"\n # Remember passed attributes\n self.parent_thin = parent_thin_fil\n self.index = index\n self.address = ('bs', self.parent_thin.index, self.index)\n # Use the passed orientation index to choose the correct \n # orientation vector according to schema in ThinFilament docstring\n orientation_vectors = ((0.866, -0.5), (0, -1), (-0.866, -0.5), \n (-0.866, 0.5), (0, 1), (0.866, 0.5))\n self.orientation = orientation_vectors[orientation]\n # Start off in an activated state, fully open to binding\n self.permissiveness = 1.0\n # Create attributes to store things not yet present \n self.bound_to = None # None if unbound, Crossbridge object otherwise\n \n def __str__(self):\n \"\"\"Return the current situation of the binding site\"\"\"\n ident = ['Binding Site #' + str(self.index) + ' Info']\n ident.append(14 * '=')\n ident.append('State: ' + str(self.get_state()))\n if self.get_state() != 0:\n ident.append('Forces: ' + str(self.axialforce()) \n + '/' + str(self.radialforce()))\n return '\\n'.join(ident)\n \n def to_dict(self):\n \"\"\"Create a JSON compatible representation of the binding site\n \n Usage example:json.dumps(bs.to_dict(), indent=1) \n \n Current output includes:\n address: largest to most local, indices for finding this\n bound_to: T/F if the binding site is bound\n orientation: the y/z orientation of the binding site relative to \n the center of the thin filament\n permissiveness: the 0-1 level of binding permissiveness \n \"\"\"\n bsd = self.__dict__.copy()\n bsd.pop('index')\n bsd.pop('parent_thin')\n if bsd['bound_to'] is not None:\n bsd['bound_to'] = bsd['bound_to'].address\n return bsd\n \n def from_dict(self, bsd):\n \"\"\" Load values from a binding site dict. Values read in correspond to \n the current output documented in to_dict.\n \"\"\"\n # Check for index mismatch\n read, current = tuple(bsd['address']), self.address\n assert read==current, \"index mismatch at %s/%s\"%(read, current)\n # Local keys\n self.orientation = bsd['orientation']\n self.permissiveness = bsd['permissiveness']\n if bsd['bound_to'] is not None:\n self.bound_to = self.parent_thin.parent_lattice.\\\n resolve_address(bsd['bound_to'])\n else:\n self.bound_to = bsd['bound_to']\n \n def axialforce(self, axial_location=None):\n \"\"\"Return the axial force of the bound cross-bridge, if any\n \n Parameters:\n axial_location: location of the current node (optional)\n Returns: \n f_x: the axial force generated by the cross-bridge\n \"\"\"\n if self.bound_to is None:\n return 0.0 \n # Axial force on actin is equal but opposite \n return -self.bound_to.axialforce(tip_axial_loc = axial_location) \n \n def radialforce(self):\n \"\"\"Radial force vector of the bound cross-bridge, if any\n \n Returns:\n (f_y, f_z): the radial force vector of this binding site\n \"\"\"\n if self.bound_to is None:\n return 0.0\n force_mag = -self.bound_to.radialforce() # Equal but opposite\n return np.multiply(force_mag, self.orientation)\n \"\"\"Create link to the relevant thick filament face when known\"\"\"\n self.thick_face = thick_face\n \n def bind_to(self, crossbridge):\n \"\"\"Link this binding site to a cross-bridge object\"\"\"\n self.bound_to = crossbridge\n \n def unbind(self):\n \"\"\"Kill off any link to a crossbridge\"\"\"\n assert(self.bound_to is not None) # Else why try to unbind?\n self.bound_to = None\n \n def get_state(self):\n \"\"\"Return the current numerical state, 0/unbound or 1/bound\"\"\"\n return self.bound_to is not None\n \n def get_lattice_spacing(self):\n \"\"\"Get lattice spacing from the parent filament\"\"\"\n return self.parent_thin.get_lattice_spacing()\n \n def get_axial_location(self):\n \"\"\"Return the current axial location of the binding site\"\"\"\n return self.parent_thin.axial[self.index]\n\n\nclass ThinFace(object):\n \"\"\"Represent one face of an actin filament\n Deals with orientation in the typical fashion for thin filaments\n ================\n || m4 || ^\n || m3 m5 || | ^ \n || af || Z / \n || m2 m0 || X \n || m1 || Y-->\n ================\n \"\"\"\n def __init__(self, parent_thin_fil, orientation, index, binding_sites):\n \"\"\"Create the thin filament face\n \n Parameters:\n parent_thin_fil: the thin filament on which this face sits\n orientation: which myosin face is opposite this face (0-5)\n index: location on the thin filament this face occupies (0-2)\n address: largest to most local, indices for finding this\n binding_sites: links to the actin binding sites on this face\n \"\"\"\n self.parent_thin = parent_thin_fil \n self.index = index\n self.address = ('thin_face', self.parent_thin.index, self.index)\n self.orientation = orientation \n self.binding_sites = binding_sites \n self.thick_face = None # ThickFace instance this face interacts with\n \n def to_dict(self):\n \"\"\"Create a JSON compatible representation of the thin face\n \n Usage example: json.dumps(thin_face.to_dict(), indent=1)\n \n Current output includes:\n address: largest to most local, indices for finding this\n orientation: out of 0-5 directions, which this projects in\n binding_sites: address information for each binding site\n \"\"\"\n tfd = self.__dict__.copy()\n tfd.pop('index')\n tfd.pop('parent_thin')\n tfd['thick_face'] = tfd['thick_face'].address\n tfd['binding_sites'] = [bs.address for bs in tfd['binding_sites']]\n return tfd\n\n def from_dict(self, tfd):\n \"\"\" Load values from a thin face dict. Values read in correspond to \n the current output documented in to_dict.\n \"\"\"\n # Check for index mismatch\n read, current = tuple(tfd['address']), self.address\n assert read==current, \"index mismatch at %s/%s\"%(read, current)\n # Local keys\n self.orientation = tfd['orientation']\n self.thick_face = self.parent_thin.parent_lattice.resolve_address(\n tfd['thick_face'])\n # Sub-structure keys\n self.binding_sites = [self.parent_thin.resolve_address(bsa) \\\n for bsa in tfd['binding_sites']]\n \n def nearest(self, axial_location):\n \"\"\"Where is the nearest binding site?\n \n There a fair number of cases that must be dealt with here. When \n the system becomes too short (and some nearest queries are being\n directed to a thin face that doesn't really have anything near \n that location) the face will just return the nearest location and \n let the kinetics deal with the fact that binding is about as likely\n as stepping into the same river twice. \n \n Parameters:\n axial_location: the axial coordinates to seek a match for\n Return:\n binding_site: the nearest binding site on this face\n \"\"\"\n # Next three lines of code enforce a jittery hiding, sometimes the \n # binding site just beyond the hiding line can be accessed\n hiding_line = self.parent_thin.hiding_line\n axial_location = max(hiding_line, axial_location)\n face_locs = [site.get_axial_location() for site in self.binding_sites]\n close_index = np.searchsorted(face_locs, axial_location)\n # If not using a very short SL, where the end face loc is closest \n if close_index != len(face_locs):\n dists = np.abs((face_locs[close_index] - axial_location, \n face_locs[close_index-1] - axial_location))\n else:\n return self.binding_sites[close_index-1] # If so, return end\n if dists[0] < dists[1] or len(self.binding_sites) >= close_index + 1:\n return self.binding_sites[close_index]\n else:\n return self.binding_sites[close_index + 1]\n \n def radialforce(self):\n \"\"\"What is the radial force this face experiences?\n \n A side note: This was where the attempt to write the model out in \n a functional manner broke down. I got this far with nothing ever \n asking another instance for any information and everything being \n passed by method parameters. This was a really nice idea and \n worked well until this point where I had to start performing \n overly complex mental calisthenics to understand how things were \n going to be passed around. This lead to the current system where \n each instance has an internal state that it is responsible for \n keeping. This might make debugging harder in the long run, but it \n made the model writable in the meanwhile. Some teeth gnashing is\n included below for reference.\n \n Teeth gnashing:\n The source of conflict here seems to be a competition between \n the desire to write this in a functional manner and have all\n information passed down to the function as is needed and the \n desire to be able to call any function of any module at any \n time and have it return something sensible. This makes \n testing some bits easier but means that it can become harder \n to track what is going on with the states of the various \n functions. I am unsure as to how this should be resolved at \n this time. I want the final design to be as uncluttered and \n easy to troubleshoot as is possible. Perhaps something where \n the storage of information is kept separate from the ways that \n the modules are acting upon it? The advantage of this is that\n passing information around becomes infinitely easier, the \n drawback is that I am not sure that this isn't just a step \n removed from declaring every variable to be global and making \n the whole thing a fair bit more brittle.\n \n Returns: \n radial_force: the radial force myosin heads on this face exert\n \"\"\"\n # First, a sanity check\n if self.thick_face is None: \n raise AttributeError(\"Thick filament not assigned yet.\")\n # Now find the forces on each cross-bridge\n radial_forces = [site.radialforce() for site in self.binding_sites]\n return np.sum(radial_forces, 1)\n \n def set_thick_face(self, myosin_face):\n \"\"\"Link to the relevant myosin filament.\"\"\"\n assert(self.orientation == myosin_face.index) \n self.thick_face = myosin_face\n return\n \n def axial_location(self, binding_site_id):\n \"\"\"Get the axial location of the selected binding site\"\"\"\n return self.binding_sites[binding_site_id].get_axial_location()\n \n def get_lattice_spacing(self):\n \"\"\"Return lattice spacing to the face's opposite number\"\"\"\n return self.parent_thin.get_lattice_spacing() \n\n\nclass ThinFilament(object):\n \"\"\"Each thin filament is made up of two actin strands. The overall \n filament length at rest is 1119 nm [(1)][Tanner2007]. Each strand \n hosts 45 actin binding sites (or nodes) giving the whole filament \n 90 actin nodes, plus one at the Z-line for accounting.\n \n These nodes are located every 24.8 nm on each actin strand and are\n rotated by 120 degrees relative to the prior node [(1)][Tanner2007]. \n This organization does not specify the relative offsets of the two \n filament's nodes.\n \n ## Naive repeating geometry of the thin filament\n The binding nodes of the two actin filaments must be offset by a \n multiple of the angle (120 degrees)x(distance offset/(24.8 nm)), but \n not by 360 degrees, or one of the actin filaments would have no binding \n sites facing a neighboring thick filament. We assume that the actin \n nodes on the two strands are offset by half of the distance between \n adjacent nodes (12.4 nm) and 180 degrees. This means that if one actin\n filament has a binding site facing one myosin filament, the second actin\n filament will have a binding site facing a second myosin filament \n 12.4 nm down the thin filament. The second myosin filament will be \n 240 degrees clockwise of the first myosin filament around the \n thin filament.\n \n ## Binding site numbering\n As in the thick filament, the nodes/binding sites on the thin filament\n are numbered from low at the left to high on the right. Thus the 90th \n node is adjacent to the Z-line.\n \"\"\"\n def __init__(self, parent_lattice, index, face_orientations, start=0):\n \"\"\"Initialize the thin filament\n \n Parameters:\n parent_lattice: the calling half-sarcomere instance\n index: which thin filament this is (0-7)\n face_orientations: list of faces' numerical orientation (0-5)\n z_line: the location of the end of the thin filament (1250 nm)\n start: which of the 26 actin monomers in an actin\n repeating unit this filament begins with (defaults\n to the first)\n Returns:\n None\n ## Thin face arrangement\n The thin filaments faces correspond to the following diagram:\n ================\n || m4 || ^\n || m3 m5 || | ^ \n || af || Z / \n || m2 m0 || X \n || m1 || Y-->\n ================\n These orientations correspond to the orientations of the facing \n thick filaments. Each thin filament will link to either faces \n 0, 2, and 4 or faces 1, 3, and 5. \n This will result in a set of unit vectors pointing from the\n thin filament to the thick faces that are either \n ((0, 1), (0.866, -0.5), (-0.866, -0.5))\n for the case on the left or, for the case on the right,\n ((-0.886, 0.5), (0.866, 0.5), (0, -1)) \n The vectors govern both what radial force linking cross-bridges\n generate and which actin monomers are considered to be facing \n the adjacent filaments.\n \"\"\"\n # Remember who created you\n self.parent_lattice = parent_lattice\n # Remember who you are\n self.index = index\n self.address = ('thin_fil', self.index)\n # TODO The creation of the monomer positions and angles should be refactored into a static function of similar.\n # Figure out axial positions\n mono_per_poly = 26 # actin monomers in an actin polymer unit\n poly_per_fil = 15 # actin polymers in a thin filament\n polymer_base_length = 72.0 # nm per polymer unit length\n polymer_base_turns = 12.0 # revolutions per polymer\n rev = 2*np.pi # one revolution\n pitch = polymer_base_turns * rev / mono_per_poly\n rise = polymer_base_length / mono_per_poly\n # Monomer positions start near the m-line\n monomer_positions = [(\n self.z_line - mono_per_poly*poly_per_fil*rise) + m*rise\n for m in range(mono_per_poly*poly_per_fil)] \n monomer_angles = [(((m+start+1) % mono_per_poly) * pitch) % rev \n for m in range(mono_per_poly * poly_per_fil)]\n # Convert face orientations to angles, then to angles from 0 to 2pi\n orientation_vectors = ((0.866, -0.5), (0, -1.0), (-0.866, -0.5),\n (-0.866, 0.5), (0, 1.0), (0.866, 0.5))\n face_vectors = [orientation_vectors[o] for o in face_orientations]\n face_angles = [np.arctan2(v[1], v[0]) for v in face_vectors]\n face_angles = [v + rev if (v < 0) else v for v in face_angles] \n # Find which monomers are opposite each face\n wiggle = rev/24 # count faces within 15 degrees of opposite\n mono_in_each_face = [np.nonzero(np.abs(np.subtract(monomer_angles, \n face_angles[i]))<wiggle)[0] for i in range(len(face_angles))]\n # This is [(index_to_face_1, ...), (index_to_face_2, ...), ...] \n # Translate monomer position to binding site position\n axial_by_face = [[monomer_positions[mono_ind] for mono_ind in face] \n for face in mono_in_each_face]\n axial_flat = np.sort(np.hstack(axial_by_face))\n # Tie the nodes on each face into the flat axial locations \n node_index_by_face = np.array([[np.nonzero(axial_flat == l)[0][0] \n for l in f] for f in axial_by_face])\n face_index_by_node = np.tile(None, len(axial_flat))\n for face_ind in range(len(node_index_by_face)):\n for node_ind in node_index_by_face[face_ind]:\n face_index_by_node[node_ind] = face_ind\n # Create binding sites and thin faces\n self.binding_sites = []\n for index in range(len(axial_flat)):\n orientation = face_orientations[face_index_by_node[index]]\n self.binding_sites.append(BindingSite(self, index, orientation))\n self.thin_faces = []\n for face_index in range(len(node_index_by_face)):\n face_binding_sites = ([self.binding_sites[i] for i in\n node_index_by_face[face_index]])\n orientation = face_orientations[face_index]\n self.thin_faces.append(\n ThinFace(self, orientation, face_index, face_binding_sites))\n del(orientation, face_binding_sites)\n # Remember the axial locations, both current and rest\n self.axial = axial_flat\n self.rests = np.diff(np.hstack([self.axial, self.z_line]))\n # Other thin filament properties to remember\n self.number_of_nodes = len(self.binding_sites)\n self.thick_faces = None # Set after creation of thick filaments\n self.k = 1743 \n \n def to_dict(self):\n \"\"\"Create a JSON compatible representation of the thin filament\n \n Example usage: json.dumps(thin.to_dict(), indent=1)\n \n Current output includes:\n address: largest to most local, indices for finding this\n axial: axial locations of binding sites\n rests: rest spacings between axial locations\n thin_faces: each of the thin faces\n binding_sites: each of the binding sites\n k: stiffness of the thin filament\n number_of_nodes: number of binding sites\n \"\"\"\n thind = self.__dict__.copy()\n thind.pop('index')\n thind.pop('parent_lattice') # TODO: Spend a P on an id for the lattice\n thind['thick_faces'] = [tf.address for tf in thind['thick_faces']]\n thind['thin_faces'] = [tf.to_dict() for tf in thind['thin_faces']]\n thind['axial'] = list(thind['axial'])\n thind['rests'] = list(thind['rests'])\n thind['binding_sites'] = [bs.to_dict() for bs in \\\n thind['binding_sites']]\n return thind\n \n def from_dict(self, td):\n \"\"\" Load values from a thin filament dict. Values read in correspond \n to the current output documented in to_dict.\n \"\"\"\n # Check for index mismatch\n read, current = tuple(td['address']), self.address\n assert read==current, \"index mismatch at %s/%s\"%(read, current)\n # Local keys\n self.axial = np.array(td['axial'])\n self.rests = np.array(td['rests'])\n self.k = td['k']\n self.number_of_nodes = td['number_of_nodes']\n # Sub-structure and remote keys\n self.thick_faces = tuple([self.parent_lattice.resolve_address(tfa) \n for tfa in td['thick_faces']])\n for data, bs in zip(td['binding_sites'], self.binding_sites):\n bs.from_dict(data)\n for data, face in zip(td['thin_faces'], self.thin_faces):\n face.from_dict(data)\n \n def resolve_address(self, address):\n \"\"\"Give back a link to the object specified in the address\n We should only see addresses starting with 'thin_face' or 'bs'\n \"\"\"\n if address[0] == 'thin_face':\n return self.thin_faces[address[2]]\n elif address[0] == 'bs':\n return self.binding_sites[address[2]]\n import warnings\n warnings.warn(\"Unresolvable address: %s\"%address)\n \n def set_thick_faces(self, thick_faces):\n \"\"\"Set the adjacent thick faces and associated values\n \n Parameters:\n thick_faces: links to three surrounding thick faces, in the\n order (0, 2, 4) or (1, 3, 5)\n \n ## Myosin filament arrangement\n ================================== ^\n || m4 || m3 m5 || | ^ \n || or af || Z / \n || af || || X \n || m2 m0 || m1 || Y-->\n ================================== \n \"\"\"\n self.thick_faces = thick_faces\n for a_face, m_face in zip(self.thin_faces, self.thick_faces):\n a_face.set_thick_face(m_face)\n \n def effective_axial_force(self):\n \"\"\"The axial force experienced at the Z-line from the thin filament\n \n This only accounts for the force at the Z-line due to the actin \n node adjacent to it, i.e. this is the force that the Z-line\n experiences, not the tension existing elsewhere along the thin \n filament.\n Return:\n force: the axial force at the Z-line\n \"\"\"\n return (self.rests[-1] - (self.z_line - self.axial[-1])) * self.k\n \n def axial_force_of_each_node(self, axial_locations=None):\n \"\"\"Return a list of the thin filament axial force at each node\n \n Parameters:\n axial_locations: location of each node (optional)\n Returns:\n axial_forces: a list of the axial force at each node\n \"\"\"\n if axial_locations == None:\n axial_forces = [site.axialforce() for site in self.binding_sites]\n else:\n axial_forces = [site.axialforce(loc) for \n site,loc in zip(self.binding_sites, axial_locations)]\n return axial_forces\n \n def axialforce(self, axial_locations=None):\n \"\"\"Return a list of axial forces at each binding site node location\n \n This returns the force at each node location (including the z-disk \n connection point), this is the sum of the force that results from\n displacement of the nodes from their rest separation and the axial\n force created by any bound cross-bridges\n \n Parameters:\n axial_locations: location of each node (optional)\n Return:\n force: sum of force from the cross-bridges and node displacement\n \"\"\"\n # Calculate the force exerted by the thin filament's backbone\n thin = self._axial_thin_filament_forces(axial_locations)\n # Calculate the force exerted by any existing cross-bridges\n binding_sites = self.axial_force_of_each_node(axial_locations)\n # Return the combination of the two\n return np.add(thin, binding_sites)\n \n def settle(self):\n \"\"\"Reduce the total axial force on the system by moving the crowns\"\"\"\n # Total axial force on each point\n forces = self.axialforce()\n # Individual displacements needed to balance force\n isolated = 0.95*forces/self.k \n isolated[0] *= 2 # First node has spring on only one side\n # Cumulative displacements, working back from z-disk\n cumulative = np.flipud(np.cumsum(np.flipud(isolated)))\n # New axial locations\n self.axial += cumulative\n return forces\n \n def radial_force_of_each_node(self):\n \"\"\"The radial force produced at each binding site node\n \n Parameters: \n None\n Returns\n radial_forces: a list of (f_y, f_z) force vectors\n \"\"\"\n radial_forces = [nd.radialforce() for nd in self.binding_sites]\n return radial_forces\n \n def radial_force_of_filament(self):\n \"\"\"The sum of the radial force experienced by this filament \n \n Parameters:\n None\n Returns:\n radial_force: a single (f_y, f_z) vector\n \"\"\"\n radial_force_list = self.radial_force_of_each_node()\n radial_force = np.sum(radial_force_list, 0)\n return radial_force\n \n def displacement_per_node(self):\n \"\"\"Displacement from rest lengths of segments between nodes\"\"\"\n dists = np.diff(np.hstack([self.axial, self.z_line]))\n return dists - self.rests\n\n def displacement(self):\n \"\"\"A metric of how much the thin filament locations are offset\"\"\"\n return np.sum(np.abs(self.displacement_per_node()))\n \n def _axial_thin_filament_forces(self, axial_locations=None):\n \"\"\"The force of the filament binding sites, sans cross-bridges\n \n Parameters:\n axial_locations: location of each node (optional)\n Returns:\n net_force_on_each_binding_site: per-site force\n \"\"\"\n # Use the thin filament's stored axial locations if none are passed\n if axial_locations == None:\n axial_locations = np.hstack([self.axial, self.z_line]) \n else:\n axial_locations = np.hstack([axial_locations, self.z_line])\n # Find the distance from binding site to binding site\n dists = np.diff(axial_locations)\n # Find the compressive or expansive force on each spring\n spring_force = (dists - self.rests) * self.k\n # The first node's not connected, so that side has no force...\n spring_force = np.hstack([0, spring_force])\n # Convert this to the force on each node\n net_force_on_each_binding_site = np.diff(spring_force)\n return net_force_on_each_binding_site\n \n def update_axial_locations(self, flat_axial_locs):\n \"\"\"Update the axial locations to the passed ones\n \n Parameters:\n flat_axial_locs: the new locations for all axial nodes\n Returns:\n None\n \"\"\"\n # You aren't allowed to change the number of nodes\n assert(len(flat_axial_locs) == len(self.axial))\n self.axial = flat_axial_locs\n \n @property\n def z_line(self):\n return self.parent_lattice.z_line\n \n @property\n def hiding_line(self):\n \"\"\"Return the distance below which actin binding sites are hidden\"\"\"\n return self.parent_lattice.hiding_line\n \n @property\n def permissiveness(self):\n \"\"\"Return the permissiveness of each binding site\"\"\"\n return [site.permissiveness for site in self.binding_sites]\n \n @permissiveness.setter\n def permissiveness(self, new_permissiveness):\n \"\"\"Assign all binding sites the new permissiveness\"\"\"\n for site in self.binding_sites:\n site.permissiveness = new_permissiveness \n \n def get_binding_site(self, index):\n \"\"\"Return a link to the binding site site at index\"\"\"\n return self.binding_sites[index]\n \n @property\n def bound_sites(self):\n \"\"\"Give a list of binding sites that are bound to an XB\"\"\"\n return filter(lambda bs: bs.bound_to is not None, self.binding_sites)\n \n def get_axial_location(self, index):\n \"\"\"Return the axial location of the node at index\"\"\"\n return self.axial[index]\n \n def get_lattice_spacing(self):\n \"\"\"Return the lattice spacing of the half-sarcomere\"\"\"\n return self.parent_lattice.get_lattice_spacing()\n \n\nif __name__ == '__main__':\n print(\"af.py is really meant to be called as a supporting module\")\n"
] |
[
[
"numpy.hstack",
"numpy.abs",
"numpy.multiply",
"numpy.nonzero",
"numpy.flipud",
"numpy.subtract",
"numpy.arctan2",
"numpy.diff",
"numpy.searchsorted",
"numpy.add",
"numpy.array",
"numpy.sum"
]
] |
blevine37/blarf
|
[
"8f1d1abb05d72069323d7911f2afb38ad3d9146b"
] |
[
"blarf/rbfn_center.py"
] |
[
"import numpy as np\nimport math\nfrom blarf.dataset import dataset\nfrom blarf.cluster import cluster\n\nclass rbfn_center():\n def __init__(self,nd):\n self.numdims = nd\n self.positions = np.zeros(nd)\n self.numbf = 0\n self.bf_icoords = np.zeros((0,nd),dtype=np.intc)\n self.bf_widths = np.zeros((0,nd))\n\n def get_numdims(self):\n return self.numdims\n\n def set_positions(self,pos):\n self.positions = pos.copy()\n\n def get_positions(self):\n return self.positions.copy()\n\n def set_numbf(self,nbf):\n self.numbf = nbf\n nd = self.get_numdims()\n self.bf_icoords = np.resize(self.bf_icoords,(nbf,nd))\n self.bf_widths = np.resize(self.bf_widths,(nbf,nd))\n\n def get_numbf(self):\n return self.numbf\n\n def set_bf_icoords(self,ic):\n self.bf_icoords = ic.copy()\n\n def get_bf_icoords(self):\n return self.bf_icoords.copy()\n\n def add_bf(self,ic,w):\n nbf = self.get_numbf() + 1\n self.set_numbf(nbf)\n self.bf_icoords[nbf-1,:] = ic\n self.bf_widths[nbf-1,:] = w\n \n def set_bf_widths(self,w):\n self.bf_widths = w.copy()\n\n def get_bf_widths(self):\n return self.bf_widths.copy()\n\n def h5_output(self,centgrp):\n members = [attr for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith(\"__\")]\n for key in members:\n dset = centgrp.create_dataset(key, data=eval(\"self.\" + key))\n\n#################################\n# code for internal types\n#################################\n\n# reciprical_bonds\n\n def init_rbfn_center_reciprical_bonds_traditionalrbf(self,width):\n ic = np.ones(self.get_numdims(),dtype=np.intc)\n w = width*np.ones(self.get_numdims())\n self.add_bf(ic,w)\n \n def init_rbfn_center_reciprical_bonds_onedimensional(self,width):\n for idim in range(self.get_numdims()):\n ic = np.zeros(self.get_numdims(),dtype=np.intc)\n ic[idim] = 1\n w = np.zeros(self.get_numdims())\n w[idim] = width\n self.add_bf(ic,w)\n \n"
] |
[
[
"numpy.resize",
"numpy.zeros"
]
] |
Wang518hongyu/PyGEMwangtest
|
[
"f6ff507681b45599d0ecce5be2e5292e94fd09f7"
] |
[
"pygem.pygem_input.py"
] |
[
"\"\"\"Model inputs to run PyGEM\"\"\"\r\n\r\n# Built-in libraries\r\nimport os\r\n# External libraries\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\n# %% Functions to select specific glacier numbers\r\ndef get_same_glaciers(glac_fp):\r\n \"\"\"\r\n Get same 1000 glaciers for testing of priors\r\n\r\n Parameters\r\n ----------\r\n glac_fp : str\r\n filepath to where netcdf files of individual glaciers are held\r\n\r\n Returns\r\n -------\r\n glac_list : list\r\n list of rgi glacier numbers\r\n \"\"\"\r\n glac_list = []\r\n for i in os.listdir(glac_fp):\r\n if i.endswith('.nc'):\r\n glac_list.append(i.split('.')[1])\r\n glac_list = sorted(glac_list)\r\n return glac_list\r\n\r\n\r\ndef get_shean_glacier_nos(region_no, number_glaciers=0, option_random=0):\r\n \"\"\"\r\n Generate list of glaciers that have calibration data and select number of glaciers to include.\r\n\r\n The list is currently sorted in terms of area such that the largest glaciers are modeled first.\r\n\r\n Parameters\r\n ----------\r\n region_no : int\r\n region number (Shean data available for regions 13, 14, and 15)\r\n number_glaciers : int\r\n number of glaciers to include in model run (default = 0)\r\n option_random : int\r\n option to select glaciers randomly for model run (default = 0, not random)\r\n\r\n Returns\r\n -------\r\n num : list of strings\r\n list of rgi glacier numbers\r\n \"\"\"\r\n # safety, convert input to int\r\n region_no = int(region_no)\r\n # get shean's data, convert to dataframe, get\r\n # glacier numbers\r\n current_directory = os.getcwd()\r\n csv_path = current_directory + '/../DEMs/Shean_2019_0213/hma_mb_20190215_0815_std+mean_all_filled_bolch.csv'\r\n ds_all = pd.read_csv(csv_path)\r\n ds_reg = ds_all[(ds_all['RGIId'] > region_no) & (ds_all['RGIId'] < region_no + 1)].copy()\r\n if option_random == 1:\r\n ds_reg = ds_reg.sample(n=number_glaciers)\r\n else:\r\n ds_reg = ds_reg.sort_values('area_m2', ascending=False)\r\n ds_reg.reset_index(drop=True, inplace=True)\r\n\r\n # Glacier number and index for comparison\r\n ds_reg['glacno'] = ((ds_reg['RGIId'] % 1) * 10 ** 5).round(0).astype(int)\r\n ds_reg['glacno_str'] = (ds_reg['glacno'] / 10 ** 5).apply(lambda x: '%.5f' % x).astype(str).str.split('.').str[1]\r\n num = list(ds_reg['glacno_str'].values)\r\n num = sorted(num)\r\n return num\r\n\r\n\r\ndef glac_num_fromrange(int_low, int_high):\r\n \"\"\"\r\n Generate list of glaciers for all numbers between two integers.\r\n\r\n Parameters\r\n ----------\r\n int_low : int64\r\n low value of range\r\n int_high : int64\r\n high value of range\r\n\r\n Returns\r\n -------\r\n y : list\r\n list of rgi glacier numbers\r\n \"\"\"\r\n x = (np.arange(int_low, int_high + 1)).tolist()\r\n y = [str(i).zfill(5) for i in x]\r\n return y\r\n\r\n\r\ndef glac_fromcsv(csv_fullfn, cn='RGIId'):\r\n \"\"\"\r\n Generate list of glaciers from csv file\r\n\r\n Parameters\r\n ----------\r\n csv_fp, csv_fn : str\r\n csv filepath and filename\r\n\r\n Returns\r\n -------\r\n y : list\r\n list of glacier numbers, e.g., ['14.00001', 15.00001']\r\n \"\"\"\r\n df = pd.read_csv(csv_fullfn)\r\n return [x.split('-')[1] for x in df['RGIId'].values]\r\n\r\n\r\n# %%\r\n# Model setup directory\r\nmain_directory = os.getcwd()\r\n# Output directory\r\noutput_filepath = main_directory + '/../Output/'\r\nmodel_run_date = 'October 23, 2020'\r\n\r\n# ===== GLACIER SELECTION =====\r\n# rgi_regionsO1 = [13, 14, 15] # 1st order region number (RGI V6.0)\r\nrgi_regionsO1 = [1] # 1st order region number (RGI V6.0)\r\nrgi_regionsO2 = 'all' # 2nd order region number (RGI V6.0)\r\n# RGI glacier number (RGI V6.0)\r\n# Two options: (1) use glacier numbers for a given region (or 'all'), must have glac_no set to None\r\n# (2) glac_no is not None, e.g., ['1.00001', 13.0001'], overrides rgi_glac_number\r\nrgi_glac_number = 'all'\r\n# rgi_glac_number = ['00013']\r\n# rgi_glac_number = glac_num_fromrange(1,5)\r\n# rgi_glac_number = get_same_glaciers(output_filepath + 'cal_opt1/reg1/')\r\n# rgi_glac_number = get_shean_glacier_nos(rgi_regionsO1[0], 1, option_random=1)\r\n# glac_no = ['15.03733']\r\nglac_no = ['15.03742']\r\n# glac_no = ['1.00570','1.15645','11.00897','14.06794','15.03733','18.02342']\r\n# glac_no = None\r\nif glac_no is not None:\r\n rgi_regionsO1 = sorted(list(set([int(x.split('.')[0]) for x in glac_no])))\r\n\r\n# ===== CLIMATE DATA =====\r\n# Reference period runs\r\n# ref_gcm_name = 'ERA-Interim' # reference climate dataset\r\nref_gcm_name = 'ERA5' # reference climate dataset\r\nref_startyear = 2000 # first year of model run (reference dataset)\r\nref_endyear = 2019 # last year of model run (reference dataset)\r\nref_wateryear = 'calendar' # options for years: 'calendar', 'hydro', 'custom'\r\nref_spinupyears = 0 # spin up years\r\nconstantarea_years = 0 # number of years to not let the area or volume change\r\nif ref_spinupyears > 0:\r\n assert 0 == 1, 'Code needs to be tested to enure spinup years are correctly accounted for in output files'\r\nif constantarea_years > 0:\r\n print('\\nConstant area years > 0\\n')\r\n\r\n# Simulation runs (separate so calibration and simulations can be run at same time; also needed for bias adjustments)\r\n# gcm_startyear = 2000 # first year of model run (simulation dataset)\r\n# gcm_endyear = 2019 # last year of model run (simulation dataset)\r\ngcm_startyear = 2000 # first year of model run (simulation dataset)\r\ngcm_endyear = 2100 # last year of model run (simulation dataset)\r\ngcm_spinupyears = 0 # spin up years for simulation (output not set up for spinup years at present)\r\nif gcm_spinupyears > 0:\r\n assert 0 == 1, 'Code needs to be tested to enure spinup years are correctly accounted for in output files'\r\ngcm_wateryear = 'calendar' # water year for simulation\r\n\r\n# Hindcast option (flips array so 1960-2000 would run 2000-1960 ensuring that glacier area at 2000 is correct)\r\nhindcast = 0 # 1: run hindcast simulation, 0: do not\r\nif hindcast == 1:\r\n constantarea_years = 0 # number of years to not let the area or volume change\r\n gcm_startyear = 1980 # first year of model run (simulation dataset)\r\n gcm_endyear = 2000 # last year of model run (simulation dataset)\r\n\r\n# Synthetic options (synthetic refers to created climate data, e.g., repeat 1995-2015 for the next 100 years)\r\noption_synthetic_sim = 0 # 1: run synthetic simulation, 0: do not\r\nif option_synthetic_sim == 1:\r\n synthetic_startyear = 1995 # synthetic start year\r\n synthetic_endyear = 2015 # synethetic end year\r\n synthetic_spinupyears = 0 # synthetic spinup years\r\n synthetic_temp_adjust = 3 # Temperature adjustment factor for synthetic runs\r\n synthetic_prec_factor = 1.12 # Precipitation adjustment factor for synthetic runs\r\n\r\n# %% SIMULATION OPTIONS\r\n# Glacier dynamics options ('OGGM', 'MassRedistributionCurves', None??)\r\noption_dynamics = 'OGGM'\r\n# option_dynamics = 'MassRedistributionCurves'\r\n\r\n# MCMC options\r\nsim_iters = 100 # number of simulations (needed for cal_opt 2)\r\n# sim_iters = 1 # number of simulations (needed for cal_opt 2)\r\n# print('\\n\\nDELETE ME! - SWITCH SIM_ITERS BACK TO 100\\n\\n')\r\nsim_burn = 200 # number of burn-in (needed for cal_opt 2)\r\n\r\n# Simulation output filepath\r\noutput_sim_fp = output_filepath + 'simulations/'\r\n# Simulation output statistics (can include 'mean', 'std', '2.5%', '25%', 'median', '75%', '97.5%')\r\nsim_stat_cns = ['mean', 'std']\r\n# Bias adjustment options (0: no adjustment, 1: new prec scheme and temp from HH2015, 2: HH2015 methods)\r\noption_bias_adjustment = 1\r\n\r\n# %% ===== CALIBRATION OPTIONS =====\r\n# Calibration option ('MCMC', 'HH2015', 'HH2015mod')\r\n# option_calibration = 'MCMC'\r\n# option_calibration = 'HH2015'\r\noption_calibration = 'HH2015mod'\r\n# option_calibration = 'emulator'\r\n# Calibration datasets ('shean', 'larsen', 'mcnabb', 'wgms_d', 'wgms_ee', 'group')\r\ncal_datasets = ['shean']\r\n# cal_datasets = ['shean']\r\n# Calibration output filepath\r\noutput_fp_cal = output_filepath + 'cal_' + option_calibration + '/'\r\n\r\nif option_calibration == 'HH2015':\r\n tbias_init = 0\r\n tbias_step = 1\r\n kp_init = 1.5\r\n kp_bndlow = 0.8\r\n kp_bndhigh = 2\r\n ddfsnow_init = 0.003\r\n ddfsnow_bndlow = 0.00175\r\n ddfsnow_bndhigh = 0.0045\r\n\r\nelif option_calibration == 'HH2015mod':\r\n # Initial parameters\r\n tbias_init = 0\r\n tbias_step = 0.5\r\n kp_init = 1\r\n kp_bndlow = 0.5\r\n kp_bndhigh = 5\r\n ddfsnow_init = 0.0041\r\n # Minimization details\r\n method_opt = 'SLSQP' # SciPy optimization scheme ('SLSQP' or 'L-BFGS-B')\r\n params2opt = ['tbias', 'kp']\r\n ftol_opt = 1e-3 # tolerance for SciPy optimization scheme\r\n eps_opt = 0.01 # epsilon (adjust variables for jacobian) for SciPy optimization scheme (1e-6 works)\r\n\r\nelif option_calibration == 'emulator':\r\n emulator_sims = 10000 # Number of simulations to develop the emulator\r\n tbias_step = 1 # tbias step size\r\n tbias_init = 0 # tbias initial value\r\n kp_init = 1 # kp initial value\r\n ddfsnow_init = 0.0041 # ddfsnow initial value\r\n # Distributions\r\n tbias_disttype = 'truncnormal' # Temperature bias distribution ('truncnormal', 'uniform')\r\n tbias_sigma = 3 # tbias standard deviation for truncnormal distribution\r\n kp_gamma_alpha = 2 # Precipitation factor gamma distribution alpha\r\n kp_gamma_beta = 1 # Precipitation factor gamma distribution beta\r\n ddfsnow_disttype = 'truncnormal' # Degree-day factor of snow distribution ('truncnormal')\r\n ddfsnow_mu = 0.0041 # ddfsnow mean\r\n ddfsnow_sigma = 0.0015 # ddfsnow standard deviation\r\n ddfsnow_bndlow = 0 # ddfsnow lower bound\r\n ddfsnow_bndhigh = np.inf # ddfsnow upper bound\r\n\r\nelif option_calibration == 'MCMC':\r\n # Chain options\r\n n_chains = 1 # number of chains (min 1, max 3)\r\n mcmc_sample_no = 10 # number of steps (10000 was found to be sufficient in HMA)\r\n mcmc_burn_no = 0 # number of steps to burn-in (0 records all steps in chain)\r\n mcmc_step = None # step option (None or 'am')\r\n thin_interval = 1 # thin interval if need to reduce file size (best to leave at 1 if space allows)\r\n\r\n # Degree-day factor of snow distribution options\r\n ddfsnow_disttype = 'truncnormal' # distribution type ('truncnormal', 'uniform')\r\n ddfsnow_mu = 0.0041\r\n ddfsnow_sigma = 0.0015\r\n ddfsnow_bndlow = 0\r\n ddfsnow_bndhigh = np.inf\r\n ddfsnow_start = ddfsnow_mu\r\n\r\n # Precipitation factor distribution options\r\n kp_disttype = 'gamma' # distribution type ('gamma', 'lognormal', 'uniform')\r\n kp_gamma_region_dict_fullfn = None\r\n # kp_gamma_region_dict_fullfn = main_directory + '/../Output/precfactor_gamma_region_dict.csv'\r\n if kp_gamma_region_dict_fullfn is not None:\r\n assert os.path.exists(kp_gamma_region_dict_fullfn), ('Using option_calibration: ' + option_calibration +\r\n '. Precfactor regional dictionary does not exist.')\r\n kp_gamma_region_df = pd.read_csv(kp_gamma_region_dict_fullfn)\r\n kp_gamma_region_dict = dict(zip(kp_gamma_region_df.Region.values,\r\n [[kp_gamma_region_df.loc[x, 'alpha'], kp_gamma_region_df.loc[x, 'beta']]\r\n for x in kp_gamma_region_df.index.values]))\r\n else:\r\n kp_gamma_alpha = 9\r\n kp_gamma_beta = 4\r\n kp_lognorm_mu = 0\r\n kp_lognorm_tau = 4\r\n kp_mu = 0\r\n kp_sigma = 1.5\r\n kp_bndlow = 0.5\r\n kp_bndhigh = 1.5\r\n kp_start = 1\r\n\r\n # Temperature bias distribution options\r\n tbias_disttype = 'normal' # distribution type ('normal', 'truncnormal', 'uniform')\r\n tbias_norm_region_dict_fullfn = None\r\n # tbias_norm_region_dict_fullfn = main_directory + '/../Output/tempchange_norm_region_dict.csv'\r\n if tbias_norm_region_dict_fullfn is not None:\r\n assert os.path.exists(tbias_norm_region_dict_fullfn), ('Using option_calibration: ' + option_calibration +\r\n '. Tempbias regional dictionary does not exist.')\r\n tbias_norm_region_df = pd.read_csv(tbias_norm_region_dict_fullfn)\r\n tbias_norm_region_dict = dict(zip(tbias_norm_region_df.Region.values,\r\n [[tbias_norm_region_df.loc[x, 'mu'], tbias_norm_region_df.loc[x, 'sigma']]\r\n for x in tbias_norm_region_df.index.values]))\r\n else:\r\n tbias_mu = 0\r\n tbias_sigma = 1\r\n tbias_bndlow = -10\r\n tbias_bndhigh = 10\r\n tbias_start = tbias_mu\r\n\r\n# %% ===== MODEL PARAMETERS =====\r\nuse_calibrated_modelparams = True # False: use input values, True: use calibrated model parameters\r\n# print('\\nWARNING: using non-calibrated model parameters\\n')\r\nkp = 1 # precipitation factor [-] (k_p in Radic etal 2013; c_prec in HH2015)\r\nprecgrad = 0.0001 # precipitation gradient on glacier [m-1]\r\nddfsnow = 0.0041 # degree-day factor of snow [m w.e. d-1 degC-1]\r\nddfsnow_iceratio = 0.7 # Ratio degree-day factor snow snow to ice\r\nif ddfsnow_iceratio != 0.7:\r\n print('\\n\\n Warning: ddfsnow_iceratio is', ddfsnow_iceratio, '\\n\\n')\r\nddfice = ddfsnow / ddfsnow_iceratio # degree-day factor of ice [m w.e. d-1 degC-1]\r\ntbias = 0 # temperature bias [deg C]\r\nlrgcm = -0.0065 # lapse rate from gcm to glacier [K m-1]\r\nlrglac = -0.0065 # lapse rate on glacier for bins [K m-1]\r\ntsnow_threshold = 1.0 # temperature threshold for snow [deg C] (HH2015 used 1.5 degC +/- 1 degC)\r\nfrontalablation_k = 2 # frontal ablation rate [yr-1]\r\naf = 0.7 # Bulk flow parameter for frontal ablation (m^-0.5)\r\noption_frontalablation_k = 1 # Calving option (1: values from HH2015,\r\n# 2: calibrate glaciers independently, use transfer fxns for others)\r\nfrontalablation_k0dict = {1: 3.4, # Calving dictionary with values from HH2015\r\n 2: 0, 3: 0.2, 4: 0.2, 5: 0.5, 6: 0.3, 7: 0.5, 8: 0, 9: 0.2, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0,\r\n 15: 0, 16: 0,\r\n 17: 6, 18: 0, 19: 1}\r\n\r\n# Calving width dictionary to override RGI elevation bins, which can be highly inaccurate at the calving front\r\nwidth_calving_dict_fullfn = main_directory + '/../Calving_data/calvingfront_widths.csv'\r\nif os.path.exists(width_calving_dict_fullfn):\r\n width_calving_df = pd.read_csv(width_calving_dict_fullfn)\r\n width_calving_dict = dict(zip(width_calving_df.RGIId, width_calving_df.front_width_m))\r\nelse:\r\n width_calving_dict = {}\r\n\r\n## Calving parameter dictionary (according to Supplementary Table 3 in HH2015)\r\n# frontalablation_k0dict_fullfn = main_directory + '/../Calving_data/frontalablation_k0_dict.csv'\r\n# if os.path.exists(frontalablation_k0dict_fullfn):\r\n# frontalablation_k0dict_df = pd.read_csv(frontalablation_k0dict_fullfn)\r\n# frontalablation_k0dict = dict(zip(frontalablation_k0dict_df.O1Region, frontalablation_k0dict_df.k0))\r\n# else:\r\n# frontalablation_k0dict = None\r\n\r\n# Calving glacier data\r\ncalving_data_fullfn = main_directory + '/../Calving_data/calving_glacier_data.csv'\r\n\r\n# Model parameter column names and filepaths\r\nmodelparams_colnames = ['lrgcm', 'lrglac', 'precfactor', 'precgrad', 'ddfsnow', 'ddfice', 'tempsnow', 'tempchange']\r\n# Model parameter filepath\r\nmodelparams_fp = output_filepath + 'cal_' + option_calibration + '/'\r\n# modelparams_fp = output_filepath + 'cal_opt2_spc_20190806/'\r\n\r\n# %% ===== MASS BALANCE MODEL OPTIONS =====\r\n# Initial surface type options\r\noption_surfacetype_initial = 1\r\n# option 1 (default) - use median elevation to classify snow/firn above the median and ice below.\r\n# > Sakai et al. (2015) found that the decadal ELAs are consistent with the median elevation of nine glaciers in High\r\n# Mountain Asia, and Nuimura et al. (2015) also found that the snow line altitude of glaciers in China corresponded\r\n# well with the median elevation. Therefore, the use of the median elevation for defining the initial surface type\r\n# appears to be a fairly reasonable assumption in High Mountain Asia.\r\n# option 2 - use mean elevation\r\n# option 3 (Need to code) - specify an AAR ratio and apply this to estimate initial conditions\r\ninclude_firn = True # True: firn included, False: firn is modeled as snow\r\ninclude_debris = True # True: account for debris with melt factors, False: do not account for debris\r\n\r\n# Downscaling model options\r\n# Reference elevation options for downscaling climate variables\r\noption_elev_ref_downscale = 'Zmed' # 'Zmed', 'Zmax', or 'Zmin' for median, maximum or minimum glacier elevations\r\n# Downscale temperature to bins options\r\noption_temp2bins = 1 # 1: lr_gcm and lr_glac to adjust temp from gcm to the glacier bins\r\noption_adjusttemp_surfelev = 1 # 1: adjust temps based on surface elev changes; 0: no adjustment\r\n# Downscale precipitation to bins options\r\noption_prec2bins = 1 # 1: prec_factor and prec_grad to adjust precip from gcm to the glacier bins\r\noption_preclimit = 0 # 1: limit the uppermost 25% using an expontial fxn\r\n\r\n# Accumulation model options\r\noption_accumulation = 2 # 1: single threshold, 2: threshold +/- 1 deg using linear interpolation\r\n\r\n# Ablation model options\r\noption_ablation = 1 # 1: monthly temp, 2: superimposed daily temps enabling melt near 0 (HH2015)\r\noption_ddf_firn = 1 # 0: ddf_firn = ddf_snow; 1: ddf_firn = mean of ddf_snow and ddf_ice\r\nddfdebris = ddfice # add options for handling debris-covered glaciers\r\n\r\n# Refreezing model options\r\n# option_refreezing = 'HH2015' # HH2015: heat conduction (Huss and Hock, 2015)\r\noption_refreezing = 'Woodward' # Woodward: annual air temp (Woodward etal 1997)\r\nif option_refreezing == 'HH2015':\r\n rf_layers = 5 # number of layers for refreezing model (8 is sufficient - Matthias)\r\n # rf_layers_max = 8 # number of layers to include for refreeze calculation\r\n rf_dz = 10 / rf_layers # layer thickness (m)\r\n rf_dsc = 3 # number of time steps for numerical stability (3 is sufficient - Matthias)\r\n rf_meltcrit = 0.002 # critical amount of melt [m w.e.] for initializing refreezing module\r\n pp = 0.3 # additional refreeze water to account for water refreezing at bare-ice surface\r\n rf_dens_top = 300 # snow density at surface (kg m-3)\r\n rf_dens_bot = 650 # snow density at bottom refreezing layer (kg m-3)\r\n option_rf_limit_meltsnow = 1\r\n\r\nelif option_refreezing == 'Woodward':\r\n rf_month = 10 # refreeze month\r\n\r\n# Mass redistribution / Glacier geometry change options\r\noption_massredistribution = 1 # 1: mass redistribution (Huss and Hock, 2015)\r\noption_glaciershape = 1 # 1: parabolic (Huss and Hock, 2015), 2: rectangular, 3: triangular\r\noption_glaciershape_width = 1 # 1: include width, 0: do not include\r\nicethickness_advancethreshold = 5 # advancing glacier ice thickness change threshold (5 m in Huss and Hock, 2015)\r\nterminus_percentage = 20 # glacier (%) considered terminus (20% in HH2015), used to size advancing new bins\r\n\r\n# %% CLIMATE DATA\r\n# ERA-INTERIM (Reference data)\r\n# Variable names\r\nera_varnames = ['temperature', 'precipitation', 'geopotential', 'temperature_pressurelevels']\r\n# Note: do not change variable names as these are set to run with the download_erainterim_data.py script.\r\n# If option 2 is being used to calculate the lapse rates, then the pressure level data is unnecessary.\r\n# Dates\r\neraint_start_date = '19790101'\r\neraint_end_date = '20180501'\r\n# Resolution\r\ngrid_res = '0.5/0.5'\r\n# Bounding box (N/W/S/E)\r\n# bounding_box = '90/0/-90/360'\r\nbounding_box = '50/60/25/105'\r\n# Lapse rate option\r\n# option 0 - lapse rates are constant defined by input\r\n# option 1 (default) - lapse rates derived from gcm pressure level temperature data (varies spatially and temporally)\r\n# option 2 - lapse rates derived from surrounding pixels (varies spatially and temporally)\r\n# Note: Be careful with option 2 as the ocean vs land/glacier temperatures can cause unrealistic inversions\r\n# This is the option used by Marzeion et al. (2012)\r\noption_lr_method = 1\r\n\r\n# ERA5\r\nif ref_gcm_name == 'ERA5':\r\n era5_fp = main_directory + '/../Climate_data/ERA5/'\r\n era5_temp_fn = 'ERA5_temp_monthly.nc'\r\n era5_tempstd_fn = 'ERA5_tempstd_monthly.nc'\r\n era5_prec_fn = 'ERA5_totalprecip_monthly.nc'\r\n era5_elev_fn = 'ERA5_geopotential.nc'\r\n era5_pressureleveltemp_fn = 'ERA5_pressureleveltemp_monthly.nc'\r\n era5_lr_fn = 'ERA5_lapserates_monthly.nc'\r\n assert os.path.exists(era5_fp), 'ERA5 filepath does not exist'\r\n assert os.path.exists(era5_fp + era5_temp_fn), 'ERA5 temperature filepath does not exist'\r\n assert os.path.exists(era5_fp + era5_prec_fn), 'ERA5 precipitation filepath does not exist'\r\n assert os.path.exists(era5_fp + era5_elev_fn), 'ERA5 elevation data does not exist'\r\n assert os.path.exists(era5_fp + era5_lr_fn), 'ERA5 lapse rate data does not exist'\r\n if option_ablation == 2:\r\n assert os.path.exists(era5_fp + era5_tempstd_fn), 'ERA5 temperature std filepath does not exist'\r\n\r\n# ERA-Interim\r\nelif ref_gcm_name == 'ERA-Interim':\r\n eraint_fp = main_directory + '/workspace/Climate_data/ERA_Interim/1/'\r\n eraint_temp_fn = 'ERAInterim_Temp2m_DailyMeanMonthly_' + eraint_start_date + '_' + eraint_end_date + '.nc'\r\n eraint_prec_fn = 'ERAInterim_TotalPrec_DailyMeanMonthly_' + eraint_start_date + '_' + eraint_end_date + '.nc'\r\n eraint_elev_fn = 'ERAInterim_geopotential.nc'\r\n eraint_pressureleveltemp_fn = 'ERAInterim_pressureleveltemp_' + eraint_start_date + '_' + eraint_end_date + '.nc'\r\n eraint_lr_fn = ('ERAInterim_lapserates_' + eraint_start_date + '_' + eraint_end_date + '_opt' +\r\n str(option_lr_method) + '_world.nc')\r\n assert os.path.exists(eraint_fp), 'ERA-Interim filepath does not exist'\r\n assert os.path.exists(eraint_temp_fn), 'ERA-Interim temperature filepath does not exist'\r\n assert os.path.exists(eraint_prec_fn), 'ERA-Interim precipitation filepath does not exist'\r\n assert os.path.exists(eraint_elev_fn), 'ERA-Interim elevation data does not exist'\r\n assert os.path.exists(eraint_lr_fn), 'ERA-Interim lapse rate data does not exist'\r\n if option_ablation == 2:\r\n assert 0 == 1, 'ERA-Interim not set up to use option_ablation 2 (temperature std data not downloaded)'\r\n\r\n# CMIP5 (GCM data)\r\ncmip5_fp_var_prefix = main_directory + '/../Climate_data/cmip5/'\r\ncmip5_fp_var_ending = '_r1i1p1_monNG/'\r\ncmip5_fp_fx_prefix = main_directory + '/../Climate_data/cmip5/'\r\ncmip5_fp_fx_ending = '_r0i0p0_fx/'\r\ncmip5_fp_lr = main_directory + '/../Climate_data/cmip5/bias_adjusted_1995_2100/2018_0524/'\r\ncmip5_lr_fn = 'biasadj_mon_lravg_1995_2015_R15.csv'\r\n\r\n# COAWST (High-resolution climate data over HMA)\r\ncoawst_fp_unmerged = main_directory + '/../Climate_data/coawst/Monthly/'\r\ncoawst_fp = main_directory + '/../Climate_data/coawst/'\r\ncoawst_fn_prefix_d02 = 'wrfout_d02_Monthly_'\r\ncoawst_fn_prefix_d01 = 'wrfout_d01_Monthly_'\r\ncoawst_temp_fn_d02 = 'wrfout_d02_Monthly_T2_1999100100-2006123123.nc'\r\ncoawst_prec_fn_d02 = 'wrfout_d02_Monthly_TOTPRECIP_1999100100-2006123123.nc'\r\ncoawst_elev_fn_d02 = 'wrfout_d02_Monthly_HGHT.nc'\r\ncoawst_temp_fn_d01 = 'wrfout_d01_Monthly_T2_1999100100-2006123123.nc'\r\ncoawst_prec_fn_d01 = 'wrfout_d01_Monthly_TOTPRECIP_1999100100-2006123123.nc'\r\ncoawst_elev_fn_d01 = 'wrfout_d01_Monthly_HGHT.nc'\r\ncoawst_vns = ['T2', 'TOTPRECIP', 'HGHT']\r\ncoawst_d02_lon_min = 65\r\ncoawst_d02_lon_max = 99\r\ncoawst_d02_lat_min = 20\r\ncoawst_d02_lat_max = 38\r\n\r\n# %% GLACIER DATA (RGI, ICE THICKNESS, ETC.)\r\n# ===== RGI DATA =====\r\n# Filepath for RGI files\r\nrgi_fp = main_directory + '/../RGI/rgi60/00_rgi60_attribs/'\r\nassert os.path.exists(rgi_fp), 'RGI filepath does not exist. PyGEM requires RGI data to run.'\r\n# Column names\r\nrgi_lat_colname = 'CenLat'\r\nrgi_lon_colname = 'CenLon_360' # REQUIRED OTHERWISE GLACIERS IN WESTERN HEMISPHERE USE 0 deg\r\nelev_colname = 'elev'\r\nindexname = 'GlacNo'\r\nrgi_O1Id_colname = 'glacno'\r\nrgi_glacno_float_colname = 'RGIId_float'\r\n# Column names from table to drop\r\nrgi_cols_drop = ['GLIMSId', 'BgnDate', 'EndDate', 'Status', 'Connect', 'Linkages', 'Name']\r\n# rgi_cols_drop = []\r\n\r\n# ===== ADDITIONAL DATA (hypsometry, ice thickness, width) =====\r\nh_consensus_fp = main_directory + '/../IceThickness_Farinotti/composite_thickness_RGI60-all_regions/'\r\n# Filepath for the hypsometry files\r\nbinsize = 10 # Elevation bin height [m]\r\n# hyps_data = 'Huss' # Hypsometry dataset (GlacierMIP; Hock etal 2019)\r\n# hyps_data = 'Farinotti' # Hyspsometry dataset (Farinotti etal 2019)\r\nhyps_data = 'OGGM' # Hypsometry dataset (OGGM; Maussion etal 2019)\r\n\r\n# Data from Farinotti et al. (2019): Consensus ice thickness estimates\r\nif hyps_data == 'Farinotti':\r\n option_shift_elevbins_20m = 0 # option to shift bins by 20 m (needed since off by 20 m, seem email 5/24/2018)\r\n # Dictionary of hypsometry filenames\r\n hyps_filepath = main_directory + '/../IceThickness_Farinotti/output/'\r\n hyps_filedict = {1: 'area_km2_01_Farinotti2019_10m.csv',\r\n 13: 'area_km2_13_Farinotti2019_10m.csv',\r\n 14: 'area_km2_14_Farinotti2019_10m.csv',\r\n 15: 'area_km2_15_Farinotti2019_10m.csv'}\r\n hyps_colsdrop = ['RGIId']\r\n # Thickness data\r\n thickness_filepath = main_directory + '/../IceThickness_Farinotti/output/'\r\n thickness_filedict = {1: 'thickness_m_01_Farinotti2019_10m.csv',\r\n 13: 'thickness_m_13_Farinotti2019_10m.csv',\r\n 14: 'thickness_m_14_Farinotti2019_10m.csv',\r\n 15: 'thickness_m_15_Farinotti2019_10m.csv'}\r\n thickness_colsdrop = ['RGIId']\r\n # Width data\r\n width_filepath = main_directory + '/../IceThickness_Farinotti/output/'\r\n width_filedict = {1: 'width_km_01_Farinotti2019_10m.csv',\r\n 13: 'width_km_13_Farinotti2019_10m.csv',\r\n 14: 'width_km_14_Farinotti2019_10m.csv',\r\n 15: 'width_km_15_Farinotti2019_10m.csv'}\r\n width_colsdrop = ['RGIId']\r\n# Data from GlacierMIP\r\nelif hyps_data == 'Huss':\r\n option_shift_elevbins_20m = 1 # option to shift bins by 20 m (needed since off by 20 m, seem email 5/24/2018)\r\n # Dictionary of hypsometry filenames\r\n # (Files from Matthias Huss should be manually pre-processed to be 'RGI-ID', 'Cont_range', and bins starting at 5)\r\n hyps_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'\r\n hyps_filedict = {\r\n 1: 'area_01_Huss_Alaska_10m.csv',\r\n 3: 'area_RGI03_10.csv',\r\n 4: 'area_RGI04_10.csv',\r\n 6: 'area_RGI06_10.csv',\r\n 7: 'area_RGI07_10.csv',\r\n 8: 'area_RGI08_10.csv',\r\n 9: 'area_RGI09_10.csv',\r\n 13: 'area_13_Huss_CentralAsia_10m.csv',\r\n 14: 'area_14_Huss_SouthAsiaWest_10m.csv',\r\n 15: 'area_15_Huss_SouthAsiaEast_10m.csv',\r\n 16: 'area_16_Huss_LowLatitudes_10m.csv',\r\n 17: 'area_17_Huss_SouthernAndes_10m.csv'}\r\n hyps_colsdrop = ['RGI-ID', 'Cont_range']\r\n # Thickness data\r\n thickness_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'\r\n thickness_filedict = {\r\n 1: 'thickness_01_Huss_Alaska_10m.csv',\r\n 3: 'thickness_RGI03_10.csv',\r\n 4: 'thickness_RGI04_10.csv',\r\n 6: 'thickness_RGI06_10.csv',\r\n 7: 'thickness_RGI07_10.csv',\r\n 8: 'thickness_RGI08_10.csv',\r\n 9: 'thickness_RGI09_10.csv',\r\n 13: 'thickness_13_Huss_CentralAsia_10m.csv',\r\n 14: 'thickness_14_Huss_SouthAsiaWest_10m.csv',\r\n 15: 'thickness_15_Huss_SouthAsiaEast_10m.csv',\r\n 16: 'thickness_16_Huss_LowLatitudes_10m.csv',\r\n 17: 'thickness_17_Huss_SouthernAndes_10m.csv'}\r\n thickness_colsdrop = ['RGI-ID', 'Cont_range']\r\n # Width data\r\n width_filepath = main_directory + '/../IceThickness_Huss/bands_10m_DRR/'\r\n width_filedict = {\r\n 1: 'width_01_Huss_Alaska_10m.csv',\r\n 3: 'width_RGI03_10.csv',\r\n 4: 'width_RGI04_10.csv',\r\n 6: 'width_RGI06_10.csv',\r\n 7: 'width_RGI07_10.csv',\r\n 8: 'width_RGI08_10.csv',\r\n 9: 'width_RGI09_10.csv',\r\n 13: 'width_13_Huss_CentralAsia_10m.csv',\r\n 14: 'width_14_Huss_SouthAsiaWest_10m.csv',\r\n 15: 'width_15_Huss_SouthAsiaEast_10m.csv',\r\n 16: 'width_16_Huss_LowLatitudes_10m.csv',\r\n 17: 'width_17_Huss_SouthernAndes_10m.csv'}\r\n width_colsdrop = ['RGI-ID', 'Cont_range']\r\nelif hyps_data == 'OGGM':\r\n oggm_gdir_fp = main_directory + '/../oggm_gdirs/'\r\n overwrite_gdirs = False\r\n\r\n# Debris datasets\r\nif include_debris:\r\n debris_fp = main_directory + '/../debris_data/'\r\n assert os.path.exists(debris_fp), 'Debris filepath does not exist. Turn off include_debris or add filepath.'\r\nelse:\r\n debris_fp = None\r\n\r\n# %% MODEL TIME FRAME DATA\r\n# Models require complete data for each year such that refreezing, scaling, etc. can be calculated\r\n# Leap year option\r\noption_leapyear = 0 # 1: include leap year days, 0: exclude leap years so February always has 28 days\r\n# User specified start/end dates\r\n# note: start and end dates must refer to whole years\r\nstartmonthday = '06-01'\r\nendmonthday = '05-31'\r\nwateryear_month_start = 10 # water year starting month\r\nwinter_month_start = 10 # first month of winter (for HMA winter is October 1 - April 30)\r\nsummer_month_start = 5 # first month of summer (for HMA summer is May 1 - Sept 30)\r\noption_dates = 1 # 1: use dates from date table (first of each month), 2: dates from climate data\r\ntimestep = 'monthly' # time step ('monthly' only option at present)\r\n\r\n# Seasonal dictionaries for WGMS data that is not provided\r\nlat_threshold = 75\r\n# Winter (start/end) and Summer (start/end)\r\nmonthdict = {'northernmost': [9, 5, 6, 8],\r\n 'north': [10, 4, 5, 9],\r\n 'south': [4, 9, 10, 3],\r\n 'southernmost': [3, 10, 11, 2]}\r\n# Latitude threshold\r\n# 01 - Alaska - < 75\r\n# 02 - W Can - < 75\r\n# 03 - N Can - > 74\r\n# 04 - S Can - < 74\r\n# 05 - Greenland - 60 - 80\r\n# 06 - Iceland - < 75\r\n# 07 - Svalbard - 70 - 80\r\n# 08 - Scandinavia - < 70\r\n# 09 - Russia - 72 - 82\r\n# 10 - N Asia - 46 - 77\r\n\r\n\r\n# %% CALIBRATION DATASETS\r\nmb_binned_fp = main_directory + '/../DEMs/mb_bins_all-20200430/'\r\n\r\n# ===== HUGONNET GEODETIC =====\r\nhugonnet_fp = main_directory + '/../DEMs/Hugonnet2020/'\r\nhugonnet_fn = 'df_pergla_global_20yr.csv'\r\nhugonnet_rgi_glacno_cn = 'rgiid'\r\nhugonnet_mb_cn = 'dmdtda'\r\nhugonnet_mb_err_cn = 'err_dmdtda'\r\nhugonnet_time1_cn = 't1'\r\nhugonnet_time2_cn = 't2'\r\nhugonnet_area_cn = 'area_km2'\r\n\r\n# ===== SHEAN GEODETIC =====\r\nshean_fp = main_directory + '/../DEMs/Shean_2019_0213/'\r\nshean_fn = 'hma_mb_20190215_0815_std+mean_all_filled_bolch.csv'\r\nshean_rgi_glacno_cn = 'RGIId'\r\nshean_mb_cn = 'mb_mwea'\r\nshean_mb_err_cn = 'mb_mwea_sigma'\r\nshean_time1_cn = 't1'\r\nshean_time2_cn = 't2'\r\nshean_area_cn = 'area_m2'\r\n\r\n# ===== BERTHIER GEODETIC =====\r\nberthier_fp = main_directory + '/../DEMs/Berthier/output/'\r\n# berthier_fn = 'AK_all_20190913_wextrapolations_1980cheat.csv'\r\nberthier_fn = 'AK_all_20190913.csv'\r\nberthier_rgi_glacno_cn = 'RGIId'\r\nberthier_mb_cn = 'mb_mwea'\r\nberthier_mb_err_cn = 'mb_mwea_sigma'\r\nberthier_time1_cn = 't1'\r\nberthier_time2_cn = 't2'\r\nberthier_area_cn = 'area_km2'\r\n\r\n# ===== BRAUN GEODETIC =====\r\nbraun_fp = main_directory + '/../DEMs/Braun/output/'\r\nbraun_fn = 'braun_AK_all_20190924_wlarsen_mcnabb_best.csv'\r\n# braun_fn = 'braun_AK_all_20190924_wextrapolations.csv'\r\n# braun_fn = 'braun_AK_all_20190924.csv'\r\nbraun_rgi_glacno_cn = 'RGIId'\r\nbraun_mb_cn = 'mb_mwea'\r\nbraun_mb_err_cn = 'mb_mwea_sigma'\r\nbraun_time1_cn = 't1'\r\nbraun_time2_cn = 't2'\r\nbraun_area_cn = 'area_km2'\r\n\r\n# ===== BRUN GEODETIC =====\r\nbrun_fp = main_directory + '/../DEMs/'\r\nbrun_fn = 'Brun_Nature2017_MB_glacier-wide.csv'\r\nbrun_rgi_glacno_cn = 'GLA_ID'\r\nbrun_mb_cn = 'MB [m w.a a-1]'\r\nbrun_mb_err_cn = 'err. on MB [m w.e a-1]'\r\n# NEED TO FINISH SETTING UP BRUN WITH CLASS_MBDATA\r\n\r\n# ===== MAUER GEODETIC =====\r\nmauer_fp = main_directory + '/../DEMs/'\r\nmauer_fn = 'Mauer_geoMB_HMA_1970s_2000_min80pctCov.csv'\r\nmauer_rgi_glacno_cn = 'RGIId'\r\nmauer_mb_cn = 'geoMassBal'\r\nmauer_mb_err_cn = 'geoMassBalSig'\r\nmauer_time1_cn = 't1'\r\nmauer_time2_cn = 't2'\r\n\r\n# ===== MCNABB GEODETIC =====\r\nmcnabb_fp = main_directory + '/../DEMs/McNabb_data/wgms_dv/'\r\nmcnabb_fn = 'McNabb_data_all_preprocessed.csv'\r\nmcnabb_rgiid_cn = 'RGIId'\r\nmcnabb_mb_cn = 'mb_mwea'\r\nmcnabb_mb_err_cn = 'mb_mwea_sigma'\r\nmcnabb_time1_cn = 'date0'\r\nmcnabb_time2_cn = 'date1'\r\nmcnabb_area_cn = 'area'\r\n\r\n# ===== LARSEN GEODETIC =====\r\nlarsen_fp = main_directory + '/../DEMs/larsen/'\r\nlarsen_fn = 'larsen2015_supplementdata_wRGIIds_v3.csv'\r\nlarsen_rgiid_cn = 'RGIId'\r\nlarsen_mb_cn = 'mb_mwea'\r\nlarsen_mb_err_cn = 'mb_mwea_sigma'\r\nlarsen_time1_cn = 'date0'\r\nlarsen_time2_cn = 'date1'\r\nlarsen_area_cn = 'area'\r\n\r\n# ===== WGMS =====\r\nwgms_datasets = ['wgms_d', 'wgms_ee']\r\n# wgms_datasets = ['wgms_d']\r\nwgms_fp = main_directory + '/../WGMS/DOI-WGMS-FoG-2018-06/'\r\nwgms_rgi_glacno_cn = 'glacno'\r\nwgms_obs_type_cn = 'obs_type'\r\n# WGMS lookup tables information\r\nwgms_lookup_fn = 'WGMS-FoG-2018-06-AA-GLACIER-ID-LUT.csv'\r\nrgilookup_fullfn = main_directory + '/../RGI/rgi60/00_rgi60_links/00_rgi60_links.csv'\r\nrgiv6_fn_prefix = main_directory + '/../RGI/rgi60/00_rgi60_attribs/' + '*'\r\nrgiv5_fn_prefix = main_directory + '/../RGI/00_rgi50_attribs/' + '*'\r\n\r\n# WGMS (d) geodetic mass balance information\r\nwgms_d_fn = 'WGMS-FoG-2018-06-D-CHANGE.csv'\r\nwgms_d_fn_preprocessed = 'wgms_d_rgiv6_preprocessed.csv'\r\nwgms_d_thickness_chg_cn = 'THICKNESS_CHG'\r\nwgms_d_thickness_chg_err_cn = 'THICKNESS_CHG_UNC'\r\nwgms_d_volume_chg_cn = 'VOLUME_CHANGE'\r\nwgms_d_volume_chg_err_cn = 'VOLUME_CHANGE_UNC'\r\nwgms_d_z1_cn = 'LOWER_BOUND'\r\nwgms_d_z2_cn = 'UPPER_BOUND'\r\n\r\n# WGMS (e/ee) glaciological mass balance information\r\nwgms_e_fn = 'WGMS-FoG-2018-06-E-MASS-BALANCE-OVERVIEW.csv'\r\nwgms_ee_fn = 'WGMS-FoG-2018-06-EE-MASS-BALANCE.csv'\r\nwgms_ee_fn_preprocessed = 'wgms_ee_rgiv6_preprocessed.csv'\r\nwgms_ee_mb_cn = 'BALANCE'\r\nwgms_ee_mb_err_cn = 'BALANCE_UNC'\r\nwgms_ee_t1_cn = 'YEAR'\r\nwgms_ee_z1_cn = 'LOWER_BOUND'\r\nwgms_ee_z2_cn = 'UPPER_BOUND'\r\nwgms_ee_period_cn = 'period'\r\n\r\n# ===== COGLEY DATA =====\r\ncogley_fp = main_directory + '/../Calibration_datasets/'\r\ncogley_fn_preprocessed = 'Cogley_Arctic_processed_wInfo.csv'\r\ncogley_rgi_glacno_cn = 'glacno'\r\ncogley_mass_chg_cn = 'geo_mass_kgm2a'\r\ncogley_mass_chg_err_cn = 'geo_mass_unc'\r\ncogley_z1_cn = 'Zmin'\r\ncogley_z2_cn = 'Zmax'\r\ncogley_obs_type_cn = 'obs_type'\r\n\r\n# ===== REGIONAL DATA =====\r\n# Regional data refers to all measurements that have lumped multiple glaciers together\r\n# - a dictionary linking the regions to RGIIds is required\r\nmb_group_fp = main_directory + '/../Calibration_datasets/'\r\nmb_group_dict_fn = 'mb_group_dict.csv'\r\nmb_group_data_fn = 'mb_group_data.csv'\r\nmb_group_t1_cn = 'begin_period'\r\nmb_group_t2_cn = 'end_period'\r\n\r\n# %% REGIONS\r\ngrouping = None\r\n# grouping = 'himap'\r\nif grouping == 'watershed':\r\n reg_vn = 'watershed'\r\n reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_watershed.csv'\r\n reg_csv = pd.read_csv(reg_dict_fn)\r\n reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))\r\nelif grouping == 'kaab':\r\n reg_vn = 'kaab_name'\r\n reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_kaab.csv'\r\n reg_csv = pd.read_csv(reg_dict_fn)\r\n reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))\r\nelif grouping == 'himap':\r\n reg_vn = 'bolch_name'\r\n reg_dict_fn = main_directory + '/../qgis_himat/rgi60_HMA_dict_bolch.csv'\r\n reg_csv = pd.read_csv(reg_dict_fn)\r\n reg_dict = dict(zip(reg_csv.RGIId, reg_csv[reg_vn]))\r\nelse:\r\n reg_dict = {}\r\n\r\n# %% OUTPUT OPTIONS\r\n# Output package\r\n# option 0 - no netcdf package\r\n# option 1 - \"raw package\" [preferred units: m w.e.]\r\n# monthly variables for each bin (temp, prec, acc, refreeze, snowpack, melt, frontalablation,\r\n# massbal_clim)\r\n# annual variables for each bin (area, icethickness, surfacetype)\r\n# option 2 - \"Glaciologist Package\" output [units: m w.e. unless otherwise specified]:\r\n# monthly glacier-wide variables (prec, acc, refreeze, melt, frontalablation, massbal_total, runoff,\r\n# snowline)\r\n# annual glacier-wide variables (area, volume, ELA)\r\noutput_package = 2\r\noutput_glacier_attr_vns = ['glacno', 'RGIId_float', 'CenLon', 'CenLat', 'O1Region', 'O2Region', 'Area', 'Zmin', 'Zmax',\r\n 'Zmed', 'Slope', 'Aspect', 'Lmax', 'Form', 'TermType', 'Surging']\r\ntime_names = ['time', 'year', 'year_plus1']\r\n# Output package variables\r\noutput_variables_package2 = ['temp_glac_monthly', 'prec_glac_monthly', 'acc_glac_monthly',\r\n 'refreeze_glac_monthly', 'melt_glac_monthly', 'frontalablation_glac_monthly',\r\n 'massbaltotal_glac_monthly', 'runoff_glac_monthly', 'snowline_glac_monthly',\r\n 'area_glac_annual', 'volume_glac_annual', 'ELA_glac_annual',\r\n 'offglac_prec_monthly', 'offglac_refreeze_monthly', 'offglac_melt_monthly',\r\n 'offglac_snowpack_monthly', 'offglac_runoff_monthly']\r\n\r\n# %% MODEL PROPERTIES\r\ndensity_ice = 900 # Density of ice [kg m-3] (or Gt / 1000 km3)\r\ndensity_water = 1000 # Density of water [kg m-3]\r\narea_ocean = 362.5 * 10 ** 6 # Area of ocean [km2]\r\nk_ice = 2.33 # Thermal conductivity of ice [J s-1 K-1 m-1] recall (W = J s-1)\r\nk_air = 0.023 # Thermal conductivity of air [J s-1 K-1 m-1] (Mellor, 1997)\r\n# k_air = 0.001 # Thermal conductivity of air [J s-1 K-1 m-1]\r\nch_ice = 1890000 # Volumetric heat capacity of ice [J K-1 m-3] (density=900, heat_capacity=2100 J K-1 kg-1)\r\nch_air = 1297 # Volumetric Heat capacity of air [J K-1 m-3] (density=1.29, heat_capacity=1005 J K-1 kg-1)\r\nLh_rf = 333550 # Latent heat of fusion [J kg-1]\r\ntolerance = 1e-12 # Model tolerance (used to remove low values caused by rounding errors)\r\ngravity = 9.81 # Gravity [m s-2]\r\npressure_std = 101325 # Standard pressure [Pa]\r\ntemp_std = 288.15 # Standard temperature [K]\r\nR_gas = 8.3144598 # Universal gas constant [J mol-1 K-1]\r\nmolarmass_air = 0.0289644 # Molar mass of Earth's air [kg mol-1]\r\n\r\n# %% DEBUGGING OPTIONS\r\ndebug_refreeze = False\r\ndebug_mb = False\r\n\r\n# Pass variable to shell script\r\nif __name__ == '__main__':\r\n reg_str = ''\r\n for region in rgi_regionsO1:\r\n reg_str += str(region)\r\n print(reg_str)\r\n# print(rgi_glac_number[0:10])\r\n"
] |
[
[
"numpy.arange",
"pandas.read_csv"
]
] |
zsef123/darts-multi_gpu
|
[
"31ab50096b31812ebba542ca0ad9034870a3a586"
] |
[
"cnn/train_search.py"
] |
[
"import os\nimport sys\nimport time\nimport glob\nimport numpy as np\nimport torch\nimport utils\nimport logging\nimport argparse\nimport torch.nn as nn\nimport torch.utils\nimport torch.nn.functional as F\nimport torchvision.datasets as dset\nimport torch.backends.cudnn as cudnn\n\nfrom model_search import Network\nfrom architect import Architect\n\n\nparser = argparse.ArgumentParser(\"cifar\")\nparser.add_argument('--data', type=str, default='../data', help='location of the data corpus')\nparser.add_argument('--batch_size', type=int, default=64, help='batch size')\nparser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')\nparser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')\nparser.add_argument('--momentum', type=float, default=0.9, help='momentum')\nparser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')\nparser.add_argument('--report_freq', type=float, default=50, help='report frequency')\nparser.add_argument('--gpu', type=str, default='0', help='gpu device id')\nparser.add_argument('--epochs', type=int, default=50, help='num of training epochs')\nparser.add_argument('--init_channels', type=int, default=16, help='num of init channels')\nparser.add_argument('--layers', type=int, default=8, help='total number of layers')\nparser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')\nparser.add_argument('--cutout', action='store_true', default=False, help='use cutout')\nparser.add_argument('--cutout_length', type=int, default=16, help='cutout length')\nparser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')\nparser.add_argument('--save', type=str, default='EXP', help='experiment name')\nparser.add_argument('--seed', type=int, default=2, help='random seed')\nparser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')\nparser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')\nparser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')\nparser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')\nparser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')\nargs = parser.parse_args()\n\nargs.save = 'search-{}-{}'.format(args.save, time.strftime(\"%Y%m%d-%H%M%S\"))\nutils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))\n\nlog_format = '%(asctime)s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\nfh = logging.FileHandler(os.path.join(args.save, 'log.txt'))\nfh.setFormatter(logging.Formatter(log_format))\nlogging.getLogger().addHandler(fh)\n\nCIFAR_CLASSES = 10\n\n\ndef main():\n if not torch.cuda.is_available():\n logging.info('no gpu device available')\n sys.exit(1)\n\n np.random.seed(args.seed)\n gpus = [int(i) for i in args.gpu.split(',')]\n if len(gpus)==1:\n torch.cuda.set_device(int(args.gpu))\n cudnn.benchmark = True\n torch.manual_seed(args.seed)\n cudnn.enabled=True\n torch.cuda.manual_seed(args.seed)\n logging.info('gpu device = %s' % args.gpu)\n logging.info(\"args = %s\", args)\n\n criterion = nn.CrossEntropyLoss()\n criterion = criterion.cuda()\n model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion)\n if len(gpus)>1:\n model = nn.DataParallel(model)\n model = model.cuda()\n \n arch_params = list(map(id, model.module.arch_parameters()))\n weight_params = filter(lambda p: id(p) not in arch_params, \n model.parameters())\n\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n\n optimizer = torch.optim.SGD(\n weight_params, #model.parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n train_transform, valid_transform = utils._data_transforms_cifar10(args)\n train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)\n\n num_train = len(train_data)\n indices = list(range(num_train))\n split = int(np.floor(args.train_portion * num_train))\n\n train_queue = torch.utils.data.DataLoader(\n train_data, batch_size=args.batch_size,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),\n pin_memory=True, num_workers=2)\n\n valid_queue = torch.utils.data.DataLoader(\n train_data, batch_size=args.batch_size,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),\n pin_memory=True, num_workers=2)\n\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, float(args.epochs), eta_min=args.learning_rate_min)\n\n architect = Architect(model, criterion, args)\n\n for epoch in range(args.epochs):\n scheduler.step()\n lr = scheduler.get_lr()[0]\n logging.info('epoch %d lr %e', epoch, lr)\n\n genotype = model.module.genotype()\n logging.info('genotype = %s', genotype)\n\n print(F.softmax(model.module.alphas_normal, dim=-1))\n print(F.softmax(model.module.alphas_reduce, dim=-1))\n\n # training\n train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr)\n logging.info('train_acc %f', train_acc)\n\n # validation\n with torch.no_grad():\n valid_acc, valid_obj = infer(valid_queue, model, criterion)\n logging.info('valid_acc %f', valid_acc)\n\n utils.save(model, os.path.join(args.save, 'weights.pt'))\n\n\ndef train(train_queue, valid_queue, model, architect, criterion, optimizer, lr):\n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n\n for step, (input, target) in enumerate(train_queue):\n model.train()\n n = input.size(0)\n\n input = input.cuda()\n target = target.cuda()\n\n # get a random minibatch from the search queue with replacement\n input_search, target_search = next(iter(valid_queue))\n input_search = input_search.cuda()\n target_search = target_search.cuda()\n\n architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)\n\n optimizer.zero_grad()\n logits = model(input)\n loss = criterion(logits, target)\n\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)\n optimizer.step()\n\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n objs.update(loss.item(), n)\n top1.update(prec1.item(), n)\n top5.update(prec5.item(), n)\n\n if step % args.report_freq == 0:\n logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)\n\n return top1.avg, objs.avg\n\n\ndef infer(valid_queue, model, criterion):\n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n model.eval()\n\n for step, (input, target) in enumerate(valid_queue):\n input = input.cuda()\n target = target.cuda()\n\n logits = model(input)\n loss = criterion(logits, target)\n\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = input.size(0)\n objs.update(loss.item(), n)\n top1.update(prec1.item(), n)\n top5.update(prec5.item(), n)\n\n if step % args.report_freq == 0:\n logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)\n\n return top1.avg, objs.avg\n\n\nif __name__ == '__main__':\n main() \n\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.no_grad",
"torch.cuda.is_available",
"torch.optim.SGD",
"numpy.floor",
"torch.nn.DataParallel"
]
] |
svetli-n/Castor
|
[
"c731061eac238f5efb630e45449af50940962992"
] |
[
"han/model.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n#from utils import \nimport torch.nn.functional as F\nfrom han.sent_level_rnn import SentLevelRNN\nfrom han.word_level_rnn import WordLevelRNN\n \n\nclass HAN(nn.Module):\n def __init__(self, config):\n super(HAN, self).__init__()\t\n self.dataset = config.dataset\n self.mode = config.mode\n self.word_attention_rnn = WordLevelRNN(config)\n self.sentence_attention_rnn = SentLevelRNN(config)\n def forward(self, x, **kwargs):\n x = x.permute(1,2,0) ## Expected : #sentences, #words, batch size\n num_sentences = x.size()[0]\n word_attentions = None\n for i in range(num_sentences):\n _word_attention = self.word_attention_rnn(x[i,:,:])\n if word_attentions is None:\n word_attentions = _word_attention\n else:\n word_attentions = torch.cat((word_attentions, _word_attention),0)\n return self.sentence_attention_rnn(word_attentions)\n\n"
] |
[
[
"torch.cat"
]
] |
jepegit/cellpy
|
[
"b9ddb7afa3f7453bfb5f2f24a3268279bccf24c6"
] |
[
"cellpy/readers/cellreader.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Datareader for cell testers and potentiostats.\n\nThis module is used for loading data and databases created by different cell\ntesters. Currently it only accepts arbin-type res-files (access) data as\nraw data files, but we intend to implement more types soon. It also creates\nprocessed files in the hdf5-format.\n\nExample:\n >>> d = CellpyData()\n >>> d.loadcell(names = [file1.res, file2.res]) # loads and merges the runs\n >>> voltage_curves = d.get_cap()\n >>> d.save(\"mytest.hdf\")\n\n\"\"\"\n\nimport os\nfrom pathlib import Path, PurePosixPath, PureWindowsPath\nimport logging\nimport sys\nimport collections\nimport warnings\nimport csv\nimport itertools\nimport time\nimport copy\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.errors import PerformanceWarning\nfrom scipy import interpolate\n\nfrom cellpy.parameters import prms\nfrom cellpy.parameters.legacy import internal_settings as old_settings\nfrom cellpy.exceptions import WrongFileVersion, DeprecatedFeature, NullData\nfrom cellpy.parameters.internal_settings import (\n get_headers_summary,\n get_cellpy_units,\n get_headers_normal,\n get_headers_step_table,\n ATTRS_CELLPYFILE,\n ATTRS_DATASET,\n ATTRS_DATASET_DEEP,\n ATTRS_CELLPYDATA,\n)\nfrom cellpy.readers.core import (\n FileID,\n Cell,\n CELLPY_FILE_VERSION,\n MINIMUM_CELLPY_FILE_VERSION,\n xldate_as_datetime,\n interpolate_y_on_x,\n identify_last_data_point,\n pickle_protocol,\n PICKLE_PROTOCOL,\n)\n\nHEADERS_NORMAL = get_headers_normal()\nHEADERS_SUMMARY = get_headers_summary()\nHEADERS_STEP_TABLE = get_headers_step_table()\n\n# TODO: @jepe - new feature - method for assigning new cycle numbers and step numbers\n# - Sometimes the user forgets to increment the cycle number and it would be good\n# to have a method so that its possible to set new cycle numbers manually\n# - Some testers merges different steps into one (e.g CC-CV), it would be nice to have\n# a method for \"splitting that up\"\n\n# TODO: @jepe - performance warnings - mixed types within cols (pytables)\nperformance_warning_level = \"ignore\" # \"ignore\", \"error\"\nwarnings.filterwarnings(\n performance_warning_level, category=pd.io.pytables.PerformanceWarning\n)\npd.set_option(\"mode.chained_assignment\", None) # \"raise\", \"warn\", None\n\nmodule_logger = logging.getLogger(__name__)\n\n\nclass CellpyData(object):\n \"\"\"Main class for working and storing data.\n\n This class is the main work-horse for cellpy where all the functions for\n reading, selecting, and tweaking your data is located. It also contains the\n header definitions, both for the cellpy hdf5 format, and for the various\n cell-tester file-formats that can be read. The class can contain\n several cell-tests and each test is stored in a list. If you see what I mean...\n\n Attributes:\n cells (list): list of DataSet objects.\n \"\"\"\n\n def __repr__(self):\n txt = f\"CellpyData-object (id={hex(id(self))})\"\n if self.name:\n txt += f\"\\nname: {self.name}\"\n if self.table_names:\n txt += f\"\\ntable_names: {self.table_names}\"\n if self.tester:\n txt += f\"\\ntester: {self.tester}\"\n\n number_of_cells = len(self.cells)\n txt += f\"\\ncells: {number_of_cells}\"\n return txt\n\n def _repr_html_(self):\n header = f\"\"\"\n <p>\n <h3>CellpyData-object</h3>\n <b>id</b>: {hex(id(self))} <br>\n <b>name</b>: {self.name} <br>\n <b>table names</b>: {self.table_names} <br>\n <b>tester</b>: {self.tester} <br>\n <b>cells</b>: {len(self.cells)} <br>\n <b>cycle_mode</b>: {self.cycle_mode} <br>\n <b>sep</b>: {self.sep} <br>\n <b>daniel_number</b>: {self.daniel_number} <br>\n <b>cellpy_datadir</b>: {self.cellpy_datadir} <br>\n <b>raw_datadir</b>: {self.raw_datadir} <br>\n </p>\n \"\"\"\n all_vars = \"<p>\"\n all_vars += f\"\"\"\n <b>capacity_modifiers</b>: {self.capacity_modifiers} <br>\n <b>empty</b>: {self.empty} <br>\n <b>ensure_step_table</b>: {self.ensure_step_table} <br>\n <b>filestatuschecker</b>: {self.filestatuschecker} <br>\n <b>force_step_table_creation</b>: {self.force_step_table_creation} <br>\n <b>forced_errors</b>: {self.forced_errors} <br>\n <b>limit_loaded_cycles</b>: {self.limit_loaded_cycles} <br>\n <b>load_only_summary</b>: {self.load_only_summary} <br>\n <b>profile</b>: {self.profile} <br>\n <b>raw_limits</b>: {self.raw_limits} <br>\n <b>raw_units</b>: {self.raw_units} <br>\n <b>select_minimal</b>: {self.select_minimal} <br>\n <b>selected_cell_number</b>: {self.selected_cell_number} <br>\n <b>selected_scans</b>: {self.selected_scans} <br>\n <b>status_datasets</b>: {self.status_datasets} <br>\n <b>summary_exists (deprecated)</b>: {self.summary_exists} <br>\n\n\n \"\"\"\n all_vars += \"</p>\"\n\n cell_txt = \"\"\n for i, cell in enumerate(self.cells):\n cell_txt += f\"<h4>cell {i + 1} of {len(self.cells)}</h4>\"\n cell_txt += cell._repr_html_()\n\n return header + all_vars + cell_txt\n\n def __str__(self):\n txt = \"<CellpyData>\\n\"\n if self.name:\n txt += f\"name: {self.name}\\n\"\n if self.table_names:\n txt += f\"table_names: {self.table_names}\\n\"\n if self.tester:\n txt += f\"tester: {self.tester}\\n\"\n if self.cells:\n txt += \"datasets: [ ->\\n\"\n for i, d in enumerate(self.cells):\n txt += f\" ({i})\\n\"\n for t in str(d).split(\"\\n\"):\n txt += \" \"\n txt += t\n txt += \"\\n\"\n txt += \"\\n\"\n txt += \"]\"\n else:\n txt += \"datasets: []\"\n txt += \"\\n\"\n return txt\n\n def __bool__(self):\n if self.cells:\n return True\n else:\n return False\n\n def __init__(\n self,\n filenames=None,\n selected_scans=None,\n profile=False,\n filestatuschecker=None, # \"modified\"\n fetch_one_liners=False,\n tester=None,\n initialize=False,\n ):\n \"\"\"CellpyData object\n\n Args:\n filenames: list of files to load.\n selected_scans:\n profile: experimental feature.\n filestatuschecker: property to compare cellpy and raw-files;\n default read from prms-file.\n fetch_one_liners: experimental feature.\n tester: instrument used (e.g. \"arbin\") (checks prms-file as\n default).\n initialize: create a dummy (empty) dataset; defaults to False.\n \"\"\"\n\n if tester is None:\n self.tester = prms.Instruments.tester\n else:\n self.tester = tester\n self.loader = None # this will be set in the function set_instrument\n self.logger = logging.getLogger(__name__)\n logging.debug(\"created CellpyData instance\")\n self.name = None\n self.profile = profile\n self.minimum_selection = {}\n if filestatuschecker is None:\n self.filestatuschecker = prms.Reader.filestatuschecker\n else:\n self.filestatuschecker = filestatuschecker\n self.forced_errors = 0\n self.summary_exists = False\n\n if not filenames:\n self.file_names = []\n else:\n self.file_names = filenames\n if not self._is_listtype(self.file_names):\n self.file_names = [self.file_names]\n if not selected_scans:\n self.selected_scans = []\n else:\n self.selected_scans = selected_scans\n if not self._is_listtype(self.selected_scans):\n self.selected_scans = [self.selected_scans]\n\n self.cells = []\n self.status_datasets = []\n self.selected_cell_number = 0\n self.number_of_datasets = 0\n\n self.capacity_modifiers = [\"reset\"]\n\n self.list_of_step_types = [\n \"charge\",\n \"discharge\",\n \"cv_charge\",\n \"cv_discharge\",\n \"taper_charge\",\n \"taper_discharge\",\n \"charge_cv\",\n \"discharge_cv\",\n \"ocvrlx_up\",\n \"ocvrlx_down\",\n \"ir\",\n \"rest\",\n \"not_known\",\n ]\n # - options\n self.force_step_table_creation = prms.Reader.force_step_table_creation\n self.force_all = prms.Reader.force_all\n self.sep = prms.Reader.sep\n self._cycle_mode = None\n # self.max_res_filesize = prms.Reader.max_res_filesize\n self.load_only_summary = prms.Reader.load_only_summary\n self.select_minimal = prms.Reader.select_minimal\n # self.chunk_size = prms.Reader.chunk_size # 100000\n # self.max_chunks = prms.Reader.max_chunks\n # self.last_chunk = prms.Reader.last_chunk\n self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles\n self.limit_data_points = None\n # self.load_until_error = prms.Reader.load_until_error\n self.ensure_step_table = prms.Reader.ensure_step_table\n self.daniel_number = prms.Reader.daniel_number\n # self.raw_datadir = prms.Reader.raw_datadir\n self.raw_datadir = prms.Paths.rawdatadir\n # self.cellpy_datadir = prms.Reader.cellpy_datadir\n self.cellpy_datadir = prms.Paths.cellpydatadir\n # search in prm-file for res and hdf5 dirs in loadcell:\n self.auto_dirs = prms.Reader.auto_dirs\n\n # - headers and instruments\n self.headers_normal = get_headers_normal()\n self.headers_summary = get_headers_summary()\n self.headers_step_table = get_headers_step_table()\n\n self.table_names = None # dictionary defined in set_instruments\n self.set_instrument()\n\n # - units used by cellpy\n self.cellpy_units = get_cellpy_units()\n\n if initialize:\n self.initialize()\n\n def initialize(self):\n logging.debug(\"Initializing...\")\n self.cells.append(Cell())\n\n @property\n def cell(self):\n \"\"\"returns the DataSet instance\"\"\"\n # could insert a try-except thingy here...\n cell = self.cells[self.selected_cell_number]\n return cell\n\n @cell.setter\n def cell(self, new_cell):\n self.cells[self.selected_cell_number] = new_cell\n\n @property\n def dataset(self):\n \"\"\"returns the DataSet instance\"\"\"\n # could insert a try-except thingy here...\n warnings.warn(\n \"The .dataset property is deprecated, please use .cell instead.\",\n DeprecationWarning,\n )\n cell = self.cells[self.selected_cell_number]\n return cell\n\n @property\n def empty(self):\n \"\"\"gives False if the CellpyData object is empty (or un-functional)\"\"\"\n return not self.check()\n\n @classmethod\n def vacant(cls, cell=None):\n \"\"\"Create a CellpyData instance.\n Args:\n cell (CellpyData instance): the attributes from the cell will be copied\n to the new Cellpydata instance.\n\n Returns:\n CellpyData instance.\n \"\"\"\n\n new_cell = cls(initialize=True)\n if cell is not None:\n for attr in ATTRS_DATASET:\n value = getattr(cell.cell, attr)\n setattr(new_cell.cell, attr, value)\n\n for attr in ATTRS_DATASET_DEEP:\n value = getattr(cell.cell, attr)\n setattr(new_cell.cell, attr, copy.deepcopy(value))\n\n for attr in ATTRS_CELLPYDATA:\n value = getattr(cell, attr)\n setattr(new_cell, attr, value)\n\n return new_cell\n\n def split(self, cycle=None):\n \"\"\"Split experiment (CellpyData object) into two sub-experiments. if cycle\n is not give, it will split on the median cycle number\"\"\"\n\n if isinstance(cycle, int) or cycle is None:\n return self.split_many(base_cycles=cycle)\n\n def drop_from(self, cycle=None):\n \"\"\"Select first part of experiment (CellpyData object) up to cycle number\n 'cycle'\"\"\"\n if isinstance(cycle, int):\n c1, c2 = self.split_many(base_cycles=cycle)\n return c1\n\n def drop_to(self, cycle=None):\n \"\"\"Select last part of experiment (CellpyData object) from cycle number\n 'cycle'\"\"\"\n if isinstance(cycle, int):\n c1, c2 = self.split_many(base_cycles=cycle)\n return c2\n\n def drop_edges(self, start, end):\n \"\"\"Select middle part of experiment (CellpyData object) from cycle\n number 'start' to 'end\"\"\"\n\n if end < start:\n raise ValueError(\"end cannot be larger than start\")\n if end == start:\n raise ValueError(\"end cannot be the same as start\")\n return self.split_many([start, end])[1]\n\n def split_many(self, base_cycles=None):\n \"\"\"Split experiment (CellpyData object) into several sub-experiments.\n\n Args:\n base_cycles (int or list of ints): cycle(s) to do the split on.\n\n Returns:\n List of CellpyData objects\n \"\"\"\n h_summary_index = HEADERS_SUMMARY.cycle_index\n h_raw_index = HEADERS_NORMAL.cycle_index_txt\n h_step_cycle = HEADERS_STEP_TABLE.cycle\n\n if base_cycles is None:\n all_cycles = self.get_cycle_numbers()\n base_cycles = int(np.median(all_cycles))\n\n cells = list()\n if not isinstance(base_cycles, (list, tuple)):\n base_cycles = [base_cycles]\n\n dataset = self.cell\n steptable = dataset.steps\n data = dataset.raw\n summary = dataset.summary\n\n # In case Cycle_Index has been promoted to index [#index]\n if h_summary_index not in summary.columns:\n summary = summary.reset_index(drop=False)\n\n for b_cycle in base_cycles:\n steptable0, steptable = [\n steptable[steptable[h_step_cycle] < b_cycle],\n steptable[steptable[h_step_cycle] >= b_cycle],\n ]\n data0, data = [\n data[data[h_raw_index] < b_cycle],\n data[data[h_raw_index] >= b_cycle],\n ]\n summary0, summary = [\n summary[summary[h_summary_index] < b_cycle],\n summary[summary[h_summary_index] >= b_cycle],\n ]\n\n new_cell = CellpyData.vacant(cell=self)\n old_cell = CellpyData.vacant(cell=self)\n\n new_cell.cell.steps = steptable0\n new_cell.cell.raw = data0\n new_cell.cell.summary = summary0\n new_cell.cell = identify_last_data_point(new_cell.cell)\n\n old_cell.cell.steps = steptable\n old_cell.cell.raw = data\n old_cell.cell.summary = summary\n old_cell.cell = identify_last_data_point(old_cell.cell)\n\n cells.append(new_cell)\n\n cells.append(old_cell)\n return cells\n\n # TODO: @jepe - merge the _set_xxinstrument methods into one method\n def set_instrument(self, instrument=None, **kwargs):\n \"\"\"Set the instrument (i.e. tell cellpy the file-type you use).\n\n Args:\n instrument: (str) in [\"arbin\", \"bio-logic-csv\", \"bio-logic-bin\",...]\n kwargs (dict): key-word arguments sent to the initializer of the\n loader class\n\n Sets the instrument used for obtaining the data (i.e. sets file-format)\n\n \"\"\"\n\n custom_instrument_splitter = \"::\"\n\n if instrument is None:\n instrument = self.tester\n\n logging.debug(f\"Setting instrument: {instrument}\")\n\n if instrument in [\"arbin\", \"arbin_res\"]:\n from cellpy.readers.instruments.arbin_res import ArbinLoader as RawLoader\n\n self._set_instrument(RawLoader)\n self.tester = \"arbin\"\n\n elif instrument == \"arbin_sql\":\n from cellpy.readers.instruments.arbin_sql import ArbinSQLLoader as RawLoader\n\n logging.warning(f\"{instrument} is experimental! Not ready for production!\")\n self._set_instrument(RawLoader)\n self.tester = \"arbin_sql\"\n\n elif instrument == \"arbin_sql_csv\":\n from cellpy.readers.instruments.arbin_sql_csv import (\n ArbinCsvLoader as RawLoader,\n )\n\n logging.warning(f\"{instrument} is experimental! Not ready for production!\")\n self._set_instrument(RawLoader, **kwargs)\n self.tester = \"arbin_sql_csv\"\n\n elif instrument in [\"pec\", \"pec_csv\"]:\n logging.warning(\"Experimental! Not ready for production!\")\n from cellpy.readers.instruments.pec import PECLoader as RawLoader\n\n self._set_instrument(RawLoader)\n self.tester = \"pec\"\n\n elif instrument in [\"biologics\", \"biologics_mpr\"]:\n from cellpy.readers.instruments.biologics_mpr import MprLoader as RawLoader\n\n logging.warning(\"Experimental! Not ready for production!\")\n self._set_instrument(RawLoader)\n self.tester = \"biologic\"\n\n elif instrument in [\"maccor\", \"maccor_txt\"]:\n from cellpy.readers.instruments.maccor_txt import MaccorTxtLoader as RawLoader\n logging.warning(\"Experimental! Not ready for production!\")\n self._set_instrument(RawLoader, **kwargs)\n self.tester = \"maccor\"\n\n elif instrument.startswith(\"custom\"):\n logging.debug(f\"using custom instrument: {instrument}\")\n _instrument = instrument.split(custom_instrument_splitter)\n try:\n custom_instrument_definition_file = _instrument[1]\n prms.Instruments.custom_instrument_definitions_file = (\n custom_instrument_definition_file\n )\n except IndexError:\n logging.debug(\"no definition file provided\")\n\n from cellpy.readers.instruments.custom import CustomLoader as RawLoader\n\n self._set_instrument(RawLoader)\n self.tester = \"custom\"\n\n else:\n raise Exception(f\"option does not exist: '{instrument}'\")\n\n def _set_instrument(self, loader_class, **kwargs):\n self.loader_class = loader_class(**kwargs)\n # ----- get information --------------------------\n self.raw_units = self.loader_class.get_raw_units()\n self.raw_limits = self.loader_class.get_raw_limits()\n # ----- create the loader ------------------------\n self.loader = self.loader_class.loader\n\n def _create_logger(self):\n from cellpy import log\n\n self.logger = logging.getLogger(__name__)\n log.setup_logging(default_level=\"DEBUG\")\n\n @property\n def cycle_mode(self):\n try:\n cell = self.cell\n return cell.cycle_mode\n except IndexError:\n return self._cycle_mode\n\n @cycle_mode.setter\n def cycle_mode(self, cycle_mode):\n logging.debug(f\"-> cycle_mode: {cycle_mode}\")\n try:\n cell = self.cell\n cell.cycle_mode = cycle_mode\n self._cycle_mode = cycle_mode\n except IndexError:\n self._cycle_mode = cycle_mode\n\n def set_raw_datadir(self, directory=None):\n \"\"\"Set the directory containing .res-files.\n\n Used for setting directory for looking for res-files.@\n A valid directory name is required.\n\n Args:\n directory (str): path to res-directory\n\n Example:\n >>> d = CellpyData()\n >>> directory = \"MyData/Arbindata\"\n >>> d.set_raw_datadir(directory)\n\n \"\"\"\n\n if directory is None:\n logging.info(\"No directory name given\")\n return\n if not os.path.isdir(directory):\n logging.info(directory)\n logging.info(\"Directory does not exist\")\n return\n self.raw_datadir = directory\n\n def set_cellpy_datadir(self, directory=None):\n \"\"\"Set the directory containing .hdf5-files.\n\n Used for setting directory for looking for hdf5-files.\n A valid directory name is required.\n\n Args:\n directory (str): path to hdf5-directory\n\n Example:\n >>> d = CellpyData()\n >>> directory = \"MyData/HDF5\"\n >>> d.set_raw_datadir(directory)\n\n \"\"\"\n\n if directory is None:\n logging.info(\"No directory name given\")\n return\n if not os.path.isdir(directory):\n logging.info(\"Directory does not exist\")\n return\n self.cellpy_datadir = directory\n\n def check_file_ids(self, rawfiles, cellpyfile, detailed=False):\n \"\"\"Check the stats for the files (raw-data and cellpy hdf5).\n\n This function checks if the hdf5 file and the res-files have the same\n timestamps etc to find out if we need to bother to load .res -files.\n\n Args:\n cellpyfile (str): filename of the cellpy hdf5-file.\n rawfiles (list of str): name(s) of raw-data file(s).\n detailed (bool): return a dict containing True or False for each\n individual raw-file\n\n Returns:\n If detailed is False:\n False if the raw files are newer than the cellpy hdf5-file\n (update needed).\n True if update is not needed.\n If detailed is True it returns a dict containing True or False for each\n individual raw-file.\n \"\"\"\n\n txt = f\"Checking file ids - using '{self.filestatuschecker}'\"\n logging.info(txt)\n\n ids_cellpy_file = self._check_cellpy_file(cellpyfile)\n\n logging.debug(f\"cellpyfile ids: {ids_cellpy_file}\")\n\n if not ids_cellpy_file:\n # logging.debug(\"hdf5 file does not exist - needs updating\")\n return False\n\n ids_raw = self._check_raw(rawfiles)\n\n if detailed:\n similar = self._parse_ids(ids_raw, ids_cellpy_file)\n return similar\n\n else:\n similar = self._compare_ids(ids_raw, ids_cellpy_file)\n if not similar:\n # logging.debug(\"hdf5 file needs updating\")\n return False\n else:\n # logging.debug(\"hdf5 file is updated\")\n return True\n\n def _check_raw(self, file_names, abort_on_missing=False):\n \"\"\"Get the file-ids for the res_files.\"\"\"\n\n strip_file_names = True\n check_on = self.filestatuschecker\n if not self._is_listtype(file_names):\n file_names = [file_names]\n\n ids = dict()\n for f in file_names:\n logging.debug(f\"checking raw file {f}\")\n fid = FileID(f)\n # logging.debug(fid)\n if fid.name is None:\n warnings.warn(f\"file does not exist: {f}\")\n if abort_on_missing:\n sys.exit(-1)\n else:\n if strip_file_names:\n name = os.path.basename(f)\n else:\n name = f\n if check_on == \"size\":\n ids[name] = int(fid.size)\n elif check_on == \"modified\":\n ids[name] = int(fid.last_modified)\n else:\n ids[name] = int(fid.last_accessed)\n return ids\n\n def _check_cellpy_file(self, filename):\n \"\"\"Get the file-ids for the cellpy_file.\"\"\"\n\n use_full_filename_path = False\n parent_level = prms._cellpyfile_root\n fid_dir = prms._cellpyfile_fid\n check_on = self.filestatuschecker\n logging.debug(\"checking cellpy-file\")\n logging.debug(filename)\n if not os.path.isfile(filename):\n logging.debug(\"cellpy-file does not exist\")\n return None\n try:\n store = pd.HDFStore(filename)\n except Exception as e:\n logging.debug(f\"could not open cellpy-file ({e})\")\n return None\n fidtable = None\n try:\n fidtable = store.select(parent_level + fid_dir)\n except KeyError:\n logging.warning(\"no fidtable - you should update your hdf5-file\")\n except NotImplementedError:\n logging.warning(\n \"your system cannot read the fid-table (posix-windows confusion) \"\n \"hopefully this will be solved in a newer version of pytables.\"\n )\n finally:\n store.close()\n if fidtable is not None:\n raw_data_files, raw_data_files_length = self._convert2fid_list(fidtable)\n txt = \"contains %i res-files\" % (len(raw_data_files))\n logging.debug(txt)\n ids = dict()\n for fid in raw_data_files:\n full_name = fid.full_name\n name = fid.name\n size = fid.size\n mod = fid.last_modified\n logging.debug(f\"fileID information for: {full_name}\")\n logging.debug(f\" modified: {mod}\")\n logging.debug(f\" size: {size}\")\n\n if use_full_filename_path:\n name = full_name\n\n if check_on == \"size\":\n ids[name] = int(fid.size)\n elif check_on == \"modified\":\n ids[name] = int(fid.last_modified)\n else:\n ids[name] = int(fid.last_accessed)\n return ids\n else:\n return None\n\n @staticmethod\n def _compare_ids(ids_raw, ids_cellpy_file):\n similar = True\n l_res = len(ids_raw)\n l_cellpy = len(ids_cellpy_file)\n if l_res == l_cellpy and l_cellpy > 0:\n for name, value in list(ids_raw.items()):\n try:\n c_value = ids_cellpy_file[name]\n except KeyError:\n logging.debug(\"KeyError when comparing raw and cellpy file.\")\n logging.debug(\n \"Could be due to upper case vs. lower case confusion.\"\n )\n similar = False\n else:\n if c_value != value:\n similar = False\n else:\n similar = False\n\n return similar\n\n @staticmethod\n def _parse_ids(ids_raw, ids_cellpy_file):\n similar = dict()\n for name in ids_raw:\n v_cellpy = ids_cellpy_file.get(name, None)\n v_raw = ids_raw[name]\n similar[name] = False\n if v_raw is not None:\n if v_raw == v_cellpy:\n similar[name] = True\n return similar\n\n def loadcell(\n self,\n raw_files,\n cellpy_file=None,\n mass=None,\n summary_on_raw=False,\n summary_ir=True,\n summary_ocv=False,\n summary_end_v=True,\n only_summary=False,\n force_raw=False,\n use_cellpy_stat_file=None,\n cell_type=None,\n selector=None,\n **kwargs,\n ):\n\n \"\"\"Loads data for given cells.\n\n Args:\n raw_files (list): name of res-files\n cellpy_file (path): name of cellpy-file\n mass (float): mass of electrode or active material\n summary_on_raw (bool): use raw-file for summary\n summary_ir (bool): summarize ir\n summary_ocv (bool): summarize ocv steps\n summary_end_v (bool): summarize end voltage\n only_summary (bool): get only the summary of the runs\n force_raw (bool): only use raw-files\n use_cellpy_stat_file (bool): use stat file if creating summary\n from raw\n cell_type (str): set the cell type (e.g. \"anode\"). If not, the default from\n the config file is used.\n selector (dict): passed to load.\n **kwargs: passed to from_raw\n\n Example:\n\n >>> srnos = my_dbreader.select_batch(\"testing_new_solvent\")\n >>> cell_datas = []\n >>> for srno in srnos:\n >>> ... my_run_name = my_dbreader.get_cell_name(srno)\n >>> ... mass = my_dbreader.get_mass(srno)\n >>> ... rawfiles, cellpyfiles = \\\n >>> ... filefinder.search_for_files(my_run_name)\n >>> ... cell_data = cellreader.CellpyData()\n >>> ... cell_data.loadcell(raw_files=rawfiles,\n >>> ... cellpy_file=cellpyfiles)\n >>> ... cell_data.set_mass(mass)\n >>> ... if not cell_data.summary_exists:\n >>> ... cell_data.make_summary() # etc. etc.\n >>> ... cell_datas.append(cell_data)\n >>>\n \"\"\"\n\n # This is a part of a dramatic API change. It will not be possible to\n # load more than one set of datasets (i.e. one single cellpy-file or\n # several raw-files that will be automatically merged)\n\n # TODO @jepe Make setting or prm so that it is possible to update only new data\n # TODO @jepe Allow passing handle to progress-bar or update a global progressbar\n\n logging.info(\"Started cellpy.cellreader.loadcell\")\n if cellpy_file is None:\n similar = False\n elif force_raw:\n similar = False\n else:\n similar = self.check_file_ids(raw_files, cellpy_file)\n logging.debug(\"checked if the files were similar\")\n\n if only_summary:\n self.load_only_summary = True\n else:\n self.load_only_summary = False\n\n if not similar:\n logging.debug(\"cellpy file(s) needs updating - loading raw\")\n logging.info(\"Loading raw-file\")\n logging.debug(raw_files)\n self.from_raw(raw_files, **kwargs)\n if cell_type is not None:\n self.cycle_mode = cell_type\n logging.debug(f\"setting cycle mode: {cell_type}\")\n logging.debug(\"loaded files\")\n # Check if the run was loaded ([] if empty)\n if self.status_datasets:\n if mass:\n self.set_mass(mass)\n if summary_on_raw:\n nom_cap = kwargs.pop(\"nom_cap\", None)\n if nom_cap is not None:\n self.set_nom_cap(nom_cap)\n self.make_summary(\n all_tests=False,\n find_ocv=summary_ocv,\n find_ir=summary_ir,\n find_end_voltage=summary_end_v,\n use_cellpy_stat_file=use_cellpy_stat_file,\n # nom_cap=nom_cap,\n )\n else:\n logging.warning(\"Empty run!\")\n\n else:\n self.load(cellpy_file, selector=selector)\n nom_cap = kwargs.pop(\"nom_cap\", None)\n if nom_cap is not None:\n self.set_nom_cap(nom_cap)\n if mass:\n self.set_mass(mass)\n\n return self\n\n def dev_update_loadcell(\n self,\n raw_files,\n cellpy_file=None,\n mass=None,\n summary_on_raw=False,\n summary_ir=True,\n summary_ocv=False,\n summary_end_v=True,\n force_raw=False,\n use_cellpy_stat_file=None,\n nom_cap=None,\n selector=None,\n ):\n\n logging.info(\"Started cellpy.cellreader.loadcell\")\n\n if cellpy_file is None or force_raw:\n similar = None\n else:\n similar = self.check_file_ids(raw_files, cellpy_file, detailed=True)\n\n logging.debug(\"checked if the files were similar\")\n\n if similar is None:\n # forcing to load only raw_files\n self.from_raw(raw_files)\n if self.status_datasets:\n if mass:\n self.set_mass(mass)\n if summary_on_raw:\n self.make_summary(\n all_tests=False,\n find_ocv=summary_ocv,\n find_ir=summary_ir,\n find_end_voltage=summary_end_v,\n use_cellpy_stat_file=use_cellpy_stat_file,\n nom_cap=nom_cap,\n )\n else:\n logging.warning(\"Empty run!\")\n return self\n\n self.load(cellpy_file, selector=selector)\n if mass:\n self.set_mass(mass)\n\n if all(similar.values()):\n logging.info(\"Everything is up to date\")\n return\n\n start_file = True\n for i, f in enumerate(raw_files):\n f = Path(f)\n if not similar[f.name] and start_file:\n try:\n last_data_point = self.cell.raw_data_files[i].last_data_point\n except IndexError:\n last_data_point = 0\n\n self.dev_update_from_raw(\n file_names=f, data_points=[last_data_point, None]\n )\n self.cell = self.dev_update_merge()\n\n elif not similar[f.name]:\n try:\n last_data_point = self.cell.raw_data_files[i].last_data_point\n except IndexError:\n last_data_point = 0\n\n self.dev_update_from_raw(\n file_names=f, data_points=[last_data_point, None]\n )\n self.merge()\n\n start_file = False\n\n self.dev_update_make_steps()\n self.dev_update_make_summary(\n all_tests=False,\n find_ocv=summary_ocv,\n find_ir=summary_ir,\n find_end_voltage=summary_end_v,\n use_cellpy_stat_file=use_cellpy_stat_file,\n )\n return self\n\n def dev_update(self, file_names=None, **kwargs):\n print(\"NOT FINISHED YET - but close\")\n if len(self.cell.raw_data_files) != 1:\n logging.warning(\"Merged cell. But can only update based on the last file\")\n print(self.cell.raw_data_files)\n for fid in self.cell.raw_data_files:\n print(fid)\n last = self.cell.raw_data_files[0].last_data_point\n\n self.dev_update_from_raw(\n file_names=file_names, data_points=[last, None], **kwargs\n )\n print(\"lets try to merge\")\n self.cell = self.dev_update_merge()\n print(\"now it is time to update the step table\")\n self.dev_update_make_steps()\n print(\"and finally, lets update the summary\")\n self.dev_update_make_summary()\n\n def dev_update_merge(self):\n print(\"NOT FINISHED YET - but very close\")\n number_of_tests = len(self.cells)\n if number_of_tests != 2:\n logging.warning(\"Cannot merge if you do not have exactly two cell-objects\")\n return\n t1, t2 = self.cells\n\n if t1.raw.empty:\n logging.debug(\"OBS! the first dataset is empty\")\n\n if t2.raw.empty:\n t1.merged = True\n logging.debug(\"the second dataset was empty\")\n logging.debug(\" -> merged contains only first\")\n return t1\n test = t1\n\n cycle_index_header = self.headers_normal.cycle_index_txt\n\n if not t1.raw.empty:\n t1.raw = t1.raw.iloc[:-1]\n raw2 = pd.concat([t1.raw, t2.raw], ignore_index=True)\n test.no_cycles = max(raw2[cycle_index_header])\n test.raw = raw2\n else:\n test.no_cycles = max(t2.raw[cycle_index_header])\n test = t2\n logging.debug(\" -> merged with new dataset\")\n\n return test\n\n def dev_update_make_steps(self, **kwargs):\n old_steps = self.cell.steps.iloc[:-1]\n # Note! hard-coding header name (might fail if changing default headers)\n from_data_point = self.cell.steps.iloc[-1].point_first\n new_steps = self.make_step_table(from_data_point=from_data_point, **kwargs)\n merged_steps = pd.concat([old_steps, new_steps]).reset_index(drop=True)\n self.cell.steps = merged_steps\n\n def dev_update_make_summary(self, **kwargs):\n print(\"NOT FINISHED YET - but not critical\")\n # Update not implemented yet, running full summary calculations for now.\n # For later:\n # old_summary = self.cell.summary.iloc[:-1]\n cycle_index_header = self.headers_summary.cycle_index\n from_cycle = self.cell.summary.iloc[-1][cycle_index_header]\n self.make_summary(from_cycle=from_cycle, **kwargs)\n # For later:\n # (Remark! need to solve how to merge culumated columns)\n # new_summary = self.make_summary(from_cycle=from_cycle)\n # merged_summary = pd.concat([old_summary, new_summary]).reset_index(drop=True)\n # self.cell.summary = merged_summary\n\n def dev_update_from_raw(self, file_names=None, data_points=None, **kwargs):\n \"\"\"This method is under development. Using this to develop updating files\n with only new data.\n \"\"\"\n print(\"NOT FINISHED YET - but very close\")\n if file_names:\n self.file_names = file_names\n\n if file_names is None:\n logging.info(\n \"No filename given and no stored in the file_names \"\n \"attribute. Returning None\"\n )\n return None\n\n if not isinstance(self.file_names, (list, tuple)):\n self.file_names = [file_names]\n\n raw_file_loader = self.loader\n\n set_number = 0\n test = None\n\n logging.debug(\"start iterating through file(s)\")\n print(self.file_names)\n\n for f in self.file_names:\n logging.debug(\"loading raw file:\")\n logging.debug(f\"{f}\")\n\n # get a list of cellpy.readers.core.Cell objects\n test = raw_file_loader(f, data_points=data_points, **kwargs)\n # remark that the bounds are included (i.e. the first datapoint\n # is 5000.\n\n logging.debug(\"added the data set - merging file info\")\n\n # raw_data_file = copy.deepcopy(test[set_number].raw_data_files[0])\n # file_size = test[set_number].raw_data_files_length[0]\n\n # test[set_number].raw_data_files.append(raw_data_file)\n # test[set_number].raw_data_files_length.append(file_size)\n # return test\n\n self.cells.append(test[set_number])\n\n self.number_of_datasets = len(self.cells)\n self.status_datasets = self._validate_datasets()\n self._invent_a_name()\n return self\n\n def from_raw(self, file_names=None, **kwargs):\n \"\"\"Load a raw data-file.\n\n Args:\n file_names (list of raw-file names): uses CellpyData.file_names if\n None. If the list contains more than one file name, then the\n runs will be merged together.\n\n Other keywords depending on loader:\n [ArbinLoader]:\n bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c)\n to skip loading.\n dataset_number (int): the data set number to select if you are dealing\n with arbin files with more than one data-set.\n data_points (tuple of ints): load only data from data_point[0] to\n data_point[1] (use None for infinite). NOT IMPLEMEMTED YET.\n\n \"\"\"\n # This function only loads one test at a time (but could contain several\n # files). The function from_res() used to implement loading several\n # datasets (using list of lists as input), however it is now deprecated.\n\n if file_names:\n self.file_names = file_names\n\n if not isinstance(self.file_names, (list, tuple)):\n self.file_names = [file_names]\n\n # file_type = self.tester\n instrument = kwargs.pop(\"instrument\", None)\n if instrument:\n logging.info(\"Setting custom instrument\")\n logging.info(f\"-> {instrument}\")\n self.set_instrument(instrument)\n raw_file_loader = self.loader\n # test is currently a list of tests - this option will be removed in the future\n # so set_number is hard-coded to 0, i.e. actual-test is always test[0]\n set_number = 0\n test = None\n counter = 0\n logging.debug(\"start iterating through file(s)\")\n\n for f in self.file_names:\n logging.debug(\"loading raw file:\")\n logging.debug(f\"{f}\")\n new_tests = raw_file_loader(f, **kwargs)\n\n if new_tests:\n\n # retrieving the first cell data (e.g. first file)\n if test is None:\n logging.debug(\"getting data from first file\")\n if new_tests[set_number].no_data:\n logging.debug(\"NO DATA\")\n else:\n test = new_tests\n\n # appending cell data file to existing\n else:\n logging.debug(\"continuing reading files...\")\n _test = self._append(test[set_number], new_tests[set_number])\n\n if not _test:\n logging.warning(f\"EMPTY TEST: {f}\")\n continue\n\n test[set_number] = _test\n\n # retrieving file info in a for-loop in case of multiple files\n # Remark!\n # - the raw_data_files attribute is a list\n # - the raw_data_files_length attribute is a list\n # The reason for this choice is not clear anymore, but\n # let us keep it like this for now\n logging.debug(\"added the data set - merging file info\")\n # TODO: include this into prms (and config-file):\n max_raw_files_to_merge = 20\n for j in range(len(new_tests[set_number].raw_data_files)):\n raw_data_file = new_tests[set_number].raw_data_files[j]\n file_size = new_tests[set_number].raw_data_files_length[j]\n test[set_number].raw_data_files.append(raw_data_file)\n test[set_number].raw_data_files_length.append(file_size)\n counter += 1\n if counter > max_raw_files_to_merge:\n logging.debug(\"ERROR? Too many files to merge\")\n raise ValueError(\n \"Too many files to merge - \"\n \"could be a p2-p3 zip thing\"\n )\n\n else:\n logging.debug(\"NOTHING LOADED\")\n\n logging.debug(\"finished loading the raw-files\")\n\n test_exists = False\n if test:\n if test[0].no_data:\n logging.debug(\n \"the first dataset (or only dataset) loaded from the raw data file is empty\"\n )\n else:\n test_exists = True\n\n if test_exists:\n if not prms.Reader.sorted_data:\n logging.debug(\"sorting data\")\n test[set_number] = self._sort_data(test[set_number])\n\n self.cells.append(test[set_number])\n else:\n logging.warning(\"No new datasets added!\")\n self.number_of_datasets = len(self.cells)\n self.status_datasets = self._validate_datasets()\n self._invent_a_name()\n return self\n\n def from_res(self, filenames=None, check_file_type=True):\n \"\"\"Convenience function for loading arbin-type data into the\n datastructure.\n\n Args:\n filenames: ((lists of) list of raw-file names): uses\n cellpy.file_names if None.\n If list-of-list, it loads each list into separate datasets.\n The files in the inner list will be merged.\n check_file_type (bool): check file type if True\n (res-, or cellpy-format)\n \"\"\"\n raise DeprecatedFeature\n\n def _validate_datasets(self, level=0):\n logging.debug(\"validating test\")\n level = 0\n # simple validation for finding empty datasets - should be expanded to\n # find not-complete datasets, datasets with missing prms etc\n v = []\n if level == 0:\n for test in self.cells:\n # check that it contains all the necessary headers\n # (and add missing ones)\n # test = self._clean_up_normal_table(test)\n # check that the test is not empty\n v.append(self._is_not_empty_dataset(test))\n logging.debug(f\"validation array: {v}\")\n return v\n\n def check(self):\n \"\"\"Returns False if no datasets exists or if one or more of the datasets\n are empty\"\"\"\n\n if len(self.status_datasets) == 0:\n return False\n if all(self.status_datasets):\n return True\n return False\n\n # TODO: maybe consider being a bit more concice (re-implement)\n def _is_not_empty_dataset(self, dataset):\n if dataset is self._empty_dataset():\n return False\n else:\n return True\n\n # TODO: check if this is useful and if it is rename, if not delete\n def _clean_up_normal_table(self, test=None, dataset_number=None):\n # check that test contains all the necessary headers\n # (and add missing ones)\n raise NotImplementedError\n\n # TODO: this is used for the check-datasetnr-thing. Will soon be obsolete?\n def _report_empty_dataset(self):\n logging.info(\"Empty set\")\n\n @staticmethod\n def _empty_dataset():\n return None\n\n def _invent_a_name(self, filename=None, override=False):\n if filename is None:\n self.name = \"nameless\"\n return\n if self.name and not override:\n return\n path = Path(filename)\n self.name = path.with_suffix(\"\").name\n\n def partial_load(self, **kwargs):\n \"\"\"Load only a selected part of the cellpy file.\"\"\"\n raise NotImplementedError\n\n def link(self, **kwargs):\n \"\"\"Create a link to a cellpy file.\n\n If the file is very big, it is sometimes better to work with the data\n out of memory (i.e. on disk). A CellpyData object with a linked file\n will in most cases work as a normal object. However, some of the methods\n might be disabled. And it will be slower.\n\n Notes:\n 2020.02.08 - maybe this functionality is not needed and can be replaced\n by using dask or similar?\n \"\"\"\n raise NotImplementedError\n\n def load(\n self,\n cellpy_file,\n parent_level=None,\n return_cls=True,\n accept_old=True,\n selector=None,\n ):\n \"\"\"Loads a cellpy file.\n\n Args:\n cellpy_file (path, str): Full path to the cellpy file.\n parent_level (str, optional): Parent level. Warning! Deprecating this soon!\n return_cls (bool): Return the class.\n accept_old (bool): Accept loading old cellpy-file versions.\n Instead of raising WrongFileVersion it only issues a warning.\n selector (): under development\n\n Returns:\n cellpy.CellPyData class if return_cls is True\n \"\"\"\n\n try:\n logging.debug(\"loading cellpy-file (hdf5):\")\n logging.debug(cellpy_file)\n\n with pickle_protocol(PICKLE_PROTOCOL):\n new_datasets = self._load_hdf5(\n cellpy_file, parent_level, accept_old, selector=selector\n )\n logging.debug(\"cellpy-file loaded\")\n\n except AttributeError:\n new_datasets = []\n logging.warning(\n \"This cellpy-file version is not supported by\"\n \"current reader (try to update cellpy).\"\n )\n\n if new_datasets:\n for dataset in new_datasets:\n self.cells.append(dataset)\n else:\n # raise LoadError\n logging.warning(\"Could not load\")\n logging.warning(str(cellpy_file))\n\n self.number_of_datasets = len(self.cells)\n self.status_datasets = self._validate_datasets()\n self._invent_a_name(cellpy_file)\n if return_cls:\n return self\n\n def old_load(\n self, cellpy_file, parent_level=None, return_cls=True, accept_old=False\n ):\n \"\"\"Loads a cellpy file.\n\n Args:\n cellpy_file (path, str): Full path to the cellpy file.\n parent_level (str, optional): Parent level. Warning! Deprecating this soon!\n return_cls (bool): Return the class.\n accept_old (bool): Accept loading old cellpy-file versions.\n Instead of raising WrongFileVersion it only issues a warning.\n\n Returns:\n cellpy.CellPyData class if return_cls is True\n \"\"\"\n\n try:\n logging.debug(\"loading cellpy-file (hdf5):\")\n logging.debug(cellpy_file)\n with pickle_protocol(PICKLE_PROTOCOL):\n new_datasets = self._load_hdf5(cellpy_file, parent_level, accept_old)\n logging.debug(\"cellpy-file loaded\")\n except AttributeError:\n new_datasets = []\n logging.warning(\n \"This cellpy-file version is not supported by\"\n \"current reader (try to update cellpy).\"\n )\n\n if new_datasets:\n for dataset in new_datasets:\n self.cells.append(dataset)\n else:\n # raise LoadError\n logging.warning(\"Could not load\")\n logging.warning(str(cellpy_file))\n\n self.number_of_datasets = len(self.cells)\n self.status_datasets = self._validate_datasets()\n self._invent_a_name(cellpy_file)\n if return_cls:\n return self\n\n def _get_cellpy_file_version(self, filename, meta_dir=\"/info\", parent_level=None):\n if parent_level is None:\n parent_level = prms._cellpyfile_root\n\n with pd.HDFStore(filename) as store:\n try:\n meta_table = store.select(parent_level + meta_dir)\n except KeyError:\n raise WrongFileVersion(\n \"This file is VERY old - cannot read file version number\"\n )\n try:\n cellpy_file_version = self._extract_from_dict(\n meta_table, \"cellpy_file_version\"\n )\n except Exception as e:\n warnings.warn(f\"Unhandled exception raised: {e}\")\n return 0\n\n return cellpy_file_version\n\n def _load_hdf5(self, filename, parent_level=None, accept_old=False, selector=None):\n \"\"\"Load a cellpy-file.\n\n Args:\n filename (str): Name of the cellpy file.\n parent_level (str) (optional): name of the parent level\n (defaults to \"CellpyData\"). DeprecationWarning!\n accept_old (bool): accept old file versions.\n selector (): select specific ranges (under development)\n\n Returns:\n loaded datasets (DataSet-object)\n \"\"\"\n\n if parent_level is None:\n parent_level = prms._cellpyfile_root\n\n if parent_level != prms._cellpyfile_root:\n logging.debug(\n f\"Using non-default parent label for the \" f\"hdf-store: {parent_level}\"\n )\n\n if not os.path.isfile(filename):\n logging.info(f\"File does not exist: {filename}\")\n raise IOError(f\"File does not exist: {filename}\")\n\n cellpy_file_version = self._get_cellpy_file_version(filename)\n logging.debug(f\"Cellpy file version {cellpy_file_version}; selector={selector}\")\n if cellpy_file_version > CELLPY_FILE_VERSION:\n raise WrongFileVersion(\n f\"File format too new: {filename} :: version: {cellpy_file_version}\"\n f\"Reload from raw or upgrade your cellpy!\"\n )\n\n elif cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:\n raise WrongFileVersion(\n f\"File format too old: {filename} :: version: {cellpy_file_version}\"\n f\"Reload from raw or downgrade your cellpy!\"\n )\n\n elif cellpy_file_version < CELLPY_FILE_VERSION:\n if accept_old:\n logging.debug(f\"old cellpy file version {cellpy_file_version}\")\n logging.debug(f\"filename: {filename}\")\n logging.warning(\n f\"Loading old file-type. It is recommended that you remake the step table and the \"\n f\"summary table.\"\n )\n new_data = self._load_old_hdf5(filename, cellpy_file_version)\n else:\n raise WrongFileVersion(\n f\"File format too old: {filename} :: version: {cellpy_file_version}\"\n f\"Try loading setting accept_old=True\"\n )\n\n else:\n logging.debug(f\"Loading {filename} :: v{cellpy_file_version}\")\n new_data = self._load_hdf5_current_version(filename, selector=selector)\n\n # self.__check_loaded_data(new_data)\n\n return new_data\n\n def _load_hdf5_current_version(\n self, filename, meta_dir=\"/info\", parent_level=None, selector=None\n ):\n if parent_level is None:\n parent_level = prms._cellpyfile_root\n\n raw_dir = prms._cellpyfile_raw\n step_dir = prms._cellpyfile_step\n summary_dir = prms._cellpyfile_summary\n fid_dir = prms._cellpyfile_fid\n\n logging.debug(f\"filename: {filename}\")\n logging.debug(f\"selector: {selector}\")\n with pd.HDFStore(filename) as store:\n data, meta_table = self._create_initial_data_set_from_cellpy_file(\n meta_dir, parent_level, store\n )\n self._check_keys_in_cellpy_file(\n meta_dir, parent_level, raw_dir, store, summary_dir\n )\n self._extract_summary_from_cellpy_file(\n data, parent_level, store, summary_dir, selector=selector\n )\n self._extract_raw_from_cellpy_file(\n data, parent_level, raw_dir, store, selector=selector\n )\n self._extract_steps_from_cellpy_file(\n data, parent_level, step_dir, store, selector=selector\n )\n fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(\n fid_dir, parent_level, store\n )\n\n self._extract_meta_from_cellpy_file(data, meta_table, filename)\n\n if fid_table_selected:\n (data.raw_data_files, data.raw_data_files_length,) = self._convert2fid_list(\n fid_table\n )\n else:\n data.raw_data_files = []\n data.raw_data_files_length = []\n # this does not yet allow multiple sets\n new_tests = [\n data\n ] # but cellpy is ready when that time comes (if it ever happens)\n return new_tests\n\n def _load_hdf5_v5(self, filename, selector=None):\n parent_level = \"CellpyData\"\n raw_dir = \"/raw\"\n step_dir = \"/steps\"\n summary_dir = \"/summary\"\n fid_dir = \"/fid\"\n meta_dir = \"/info\"\n\n with pd.HDFStore(filename) as store:\n data, meta_table = self._create_initial_data_set_from_cellpy_file(\n meta_dir, parent_level, store\n )\n self._check_keys_in_cellpy_file(\n meta_dir, parent_level, raw_dir, store, summary_dir\n )\n self._extract_summary_from_cellpy_file(\n data, parent_level, store, summary_dir, selector=selector\n )\n self._extract_raw_from_cellpy_file(\n data, parent_level, raw_dir, store, selector=selector\n )\n self._extract_steps_from_cellpy_file(\n data, parent_level, step_dir, store, selector=selector\n )\n fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(\n fid_dir, parent_level, store\n )\n\n self._extract_meta_from_cellpy_file(data, meta_table, filename)\n\n if fid_table_selected:\n (data.raw_data_files, data.raw_data_files_length,) = self._convert2fid_list(\n fid_table\n )\n else:\n data.raw_data_files = []\n data.raw_data_files_length = []\n\n # this does not yet allow multiple sets\n logging.debug(\"loaded new test\")\n new_tests = [\n data\n ] # but cellpy is ready when that time comes (if it ever happens)\n return new_tests\n\n def _load_old_hdf5(self, filename, cellpy_file_version):\n if cellpy_file_version < 5:\n new_data = self._load_old_hdf5_v3_to_v4(filename)\n elif cellpy_file_version == 5:\n new_data = self._load_hdf5_v5(filename)\n else:\n raise WrongFileVersion(f\"version {cellpy_file_version} is not supported\")\n\n if cellpy_file_version < 6:\n logging.debug(\"legacy cellpy file version needs translation\")\n new_data = old_settings.translate_headers(new_data, cellpy_file_version)\n # self.__check_loaded_data(new_data)\n return new_data\n\n def __check_loaded_data(self, new_data):\n print(\"Checking loaded data\".center(80, \"=\"))\n print(\"file names:\")\n print(self.file_names)\n print(\"new data sets:\")\n print(len(new_data))\n print(\"first data set:\")\n first = new_data[0]\n print(first)\n\n def _load_old_hdf5_v3_to_v4(self, filename):\n parent_level = \"CellpyData\"\n meta_dir = \"/info\"\n _raw_dir = \"/dfdata\"\n _step_dir = \"/step_table\"\n _summary_dir = \"/dfsummary\"\n _fid_dir = \"/fidtable\"\n\n with pd.HDFStore(filename) as store:\n data, meta_table = self._create_initial_data_set_from_cellpy_file(\n meta_dir, parent_level, store\n )\n\n self._check_keys_in_cellpy_file(\n meta_dir, parent_level, _raw_dir, store, _summary_dir\n )\n self._extract_summary_from_cellpy_file(data, parent_level, store, _summary_dir)\n self._extract_raw_from_cellpy_file(data, parent_level, _raw_dir, store)\n self._extract_steps_from_cellpy_file(data, parent_level, _step_dir, store)\n fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(\n _fid_dir, parent_level, store\n )\n self._extract_meta_from_cellpy_file(data, meta_table, filename)\n warnings.warn(\n \"Loaded old cellpy-file version (<5). \" \"Please update and save again.\"\n )\n if fid_table_selected:\n (data.raw_data_files, data.raw_data_files_length,) = self._convert2fid_list(\n fid_table\n )\n else:\n data.raw_data_files = []\n data.raw_data_files_length = []\n\n new_tests = [data]\n return new_tests\n\n def _create_initial_data_set_from_cellpy_file(self, meta_dir, parent_level, store):\n # Remark that this function is run before selecting loading method\n # based on version. If you change the meta_dir prm to something else than\n # \"/info\" it will most likely fail.\n # Remark! Used for versions 3, 4, 5\n\n data = Cell()\n meta_table = None\n\n try:\n meta_table = store.select(parent_level + meta_dir)\n except KeyError as e:\n logging.info(\"This file is VERY old - no info given here\")\n logging.info(\"You should convert the files to a newer version!\")\n logging.debug(e)\n return data, meta_table\n\n try:\n data.cellpy_file_version = self._extract_from_dict(\n meta_table, \"cellpy_file_version\"\n )\n except Exception as e:\n data.cellpy_file_version = 0\n warnings.warn(f\"Unhandled exception raised: {e}\")\n return data, meta_table\n\n logging.debug(f\"cellpy file version. {data.cellpy_file_version}\")\n return data, meta_table\n\n def _check_keys_in_cellpy_file(\n self, meta_dir, parent_level, raw_dir, store, summary_dir\n ):\n required_keys = [raw_dir, summary_dir, meta_dir]\n required_keys = [\"/\" + parent_level + _ for _ in required_keys]\n for key in required_keys:\n if key not in store.keys():\n logging.info(\n f\"This cellpy-file is not good enough - \"\n f\"at least one key is missing: {key}\"\n )\n raise Exception(\n f\"OH MY GOD! At least one crucial key is missing {key}!\"\n )\n logging.debug(f\"Keys in current cellpy-file: {store.keys()}\")\n\n def _hdf5_cycle_filter(self, table=None):\n # this is not the best way to do it\n if max_cycle := self.limit_loaded_cycles:\n if table == \"summary\":\n logging.debug(f\"limited to cycle_number {max_cycle}\")\n return (f\"index <= {int(max_cycle)}\",)\n elif table == \"raw\":\n logging.debug(f\"limited to data_point {self.limit_data_points}\")\n return (f\"index <= {int(self.limit_data_points)}\",)\n\n def _unpack_selector(self, selector):\n # not implemented yet\n # should be used for trimming the selector so that it is not necessary to parse it individually\n # for all the _extract_xxx_from_cellpy_file methods.\n return selector\n\n def _extract_summary_from_cellpy_file(\n self, data, parent_level, store, summary_dir, selector=None\n ):\n if selector is not None:\n cycle_filter = []\n if max_cycle := selector.get(\"max_cycle\", None):\n cycle_filter.append(f\"index <= {int(max_cycle)}\")\n self.limit_loaded_cycles = max_cycle\n else:\n # getting cycle filter by setting attributes:\n cycle_filter = self._hdf5_cycle_filter(\"summary\")\n\n data.summary = store.select(parent_level + summary_dir, where=cycle_filter)\n\n # TODO: max data point should be an attribute\n max_data_point = data.summary[\"data_point\"].max()\n self.limit_data_points = int(max_data_point)\n logging.debug(f\"data-point max limit: {self.limit_data_points}\")\n\n def _extract_raw_from_cellpy_file(\n self, data, parent_level, raw_dir, store, selector=None\n ):\n # selector is not implemented yet for only raw data\n # however, selector for max_cycle will still work since\n # the attribute self.limit_data_points is set while reading the summary\n cycle_filter = self._hdf5_cycle_filter(table=\"raw\")\n data.raw = store.select(parent_level + raw_dir, where=cycle_filter)\n\n def _extract_steps_from_cellpy_file(\n self, data, parent_level, step_dir, store, selector=None\n ):\n try:\n data.steps = store.select(parent_level + step_dir)\n if self.limit_data_points:\n data.steps = data.steps.loc[\n data.steps[\"point_last\"] <= self.limit_data_points\n ]\n logging.debug(f\"limited to data_point {self.limit_data_points}\")\n except Exception as e:\n print(e)\n logging.debug(\"could not get steps from cellpy-file\")\n data.steps = pd.DataFrame()\n warnings.warn(f\"Unhandled exception raised: {e}\")\n\n def _extract_fids_from_cellpy_file(self, fid_dir, parent_level, store):\n logging.debug(f\"Extracting fid table from {fid_dir} in hdf5 store\")\n try:\n fid_table = store.select(\n parent_level + fid_dir\n ) # remark! changed spelling from\n # lower letter to camel-case!\n fid_table_selected = True\n except Exception as e:\n logging.debug(e)\n logging.debug(\"could not get fid from cellpy-file\")\n fid_table = []\n warnings.warn(\"no fid_table - you should update your cellpy-file\")\n fid_table_selected = False\n return fid_table, fid_table_selected\n\n def _extract_meta_from_cellpy_file(self, data, meta_table, filename):\n # get attributes from meta table\n # remark! could also utilise the pandas to dictionary method directly\n # for example: meta_table.T.to_dict()\n # Maybe a good task for someone who would like to learn more about\n # how cellpy works..\n\n for attribute in ATTRS_CELLPYFILE:\n value = self._extract_from_dict(meta_table, attribute)\n # some fixes due to errors propagated into the cellpy-files\n if attribute == \"creator\":\n if not isinstance(value, str):\n value = \"no_name\"\n\n if attribute == \"test_no\":\n if not isinstance(value, (int, float)):\n value = 0\n\n setattr(data, attribute, value)\n\n if data.mass is None:\n data.mass = 1.0\n else:\n data.mass_given = True\n\n if data.cycle_mode is None:\n logging.critical(\"cycle mode not found\")\n\n data.loaded_from = str(filename)\n\n # hack to allow the renaming of tests to datasets\n try:\n name = self._extract_from_dict_hard(meta_table, \"name\")\n if not isinstance(name, str):\n name = \"no_name\"\n data.name = name\n\n except KeyError:\n logging.debug(f\"missing key in meta table: {name}\")\n print(meta_table)\n warnings.warn(\"OLD-TYPE: Recommend to save in new format!\")\n try:\n name = self._extract_from_dict(meta_table, \"test_name\")\n except Exception as e:\n name = \"no_name\"\n logging.debug(\"name set to 'no_name\")\n warnings.warn(f\"Unhandled exception raised: {e}\")\n data.name = name\n\n # unpacking the raw data limits\n for key in data.raw_limits:\n try:\n data.raw_limits[key] = self._extract_from_dict_hard(meta_table, key)\n except KeyError:\n logging.debug(f\"missing key in meta_table: {key}\")\n warnings.warn(\"OLD-TYPE: Recommend to save in new format!\")\n\n @staticmethod\n def _extract_from_dict(t, x, default_value=None):\n try:\n value = t[x].values\n if value:\n value = value[0]\n except KeyError:\n value = default_value\n return value\n\n @staticmethod\n def _extract_from_dict_hard(t, x):\n value = t[x].values\n if value:\n value = value[0]\n return value\n\n def _create_infotable(self, dataset_number=None):\n # needed for saving class/DataSet to hdf5\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n test = self.get_cell(dataset_number)\n\n infotable = collections.OrderedDict()\n\n for attribute in ATTRS_CELLPYFILE:\n value = getattr(test, attribute)\n infotable[attribute] = [value]\n\n infotable[\"cellpy_file_version\"] = [CELLPY_FILE_VERSION]\n infotable[\"cycle_mode\"] = [self.cycle_mode]\n\n limits = test.raw_limits\n for key in limits:\n infotable[key] = limits[key]\n\n infotable = pd.DataFrame(infotable)\n\n logging.debug(\"_create_infotable: fid\")\n fidtable = collections.OrderedDict()\n fidtable[\"raw_data_name\"] = []\n fidtable[\"raw_data_full_name\"] = []\n fidtable[\"raw_data_size\"] = []\n fidtable[\"raw_data_last_modified\"] = []\n fidtable[\"raw_data_last_accessed\"] = []\n fidtable[\"raw_data_last_info_changed\"] = []\n fidtable[\"raw_data_location\"] = []\n fidtable[\"raw_data_files_length\"] = []\n fidtable[\"last_data_point\"] = []\n fids = test.raw_data_files\n fidtable[\"raw_data_fid\"] = fids\n if fids:\n for fid, length in zip(fids, test.raw_data_files_length):\n try:\n fidtable[\"raw_data_name\"].append(str(Path(fid.name).name))\n fidtable[\"raw_data_full_name\"].append(str(Path(fid.full_name)))\n fidtable[\"raw_data_size\"].append(fid.size)\n fidtable[\"raw_data_last_modified\"].append(fid.last_modified)\n fidtable[\"raw_data_last_accessed\"].append(fid.last_accessed)\n fidtable[\"raw_data_last_info_changed\"].append(fid.last_info_changed)\n except:\n logging.debug(\"this is probably not from a file\")\n fidtable[\"raw_data_name\"].append(\"db\")\n fidtable[\"raw_data_full_name\"].append(\"db\")\n fidtable[\"raw_data_size\"].append(fid.size)\n fidtable[\"raw_data_last_modified\"].append(\"db\")\n fidtable[\"raw_data_last_accessed\"].append(\"db\")\n fidtable[\"raw_data_last_info_changed\"].append(\"db\")\n\n fidtable[\"raw_data_location\"].append(fid.location)\n fidtable[\"raw_data_files_length\"].append(length)\n fidtable[\"last_data_point\"].append(fid.last_data_point)\n else:\n warnings.warn(\"seems you lost info about your raw-data (missing fids)\")\n fidtable = pd.DataFrame(fidtable)\n return infotable, fidtable\n\n def _convert2fid_list(self, tbl):\n logging.debug(\"converting loaded fidtable to FileID object\")\n fids = []\n lengths = []\n min_amount = 0\n for counter, item in enumerate(tbl[\"raw_data_name\"]):\n fid = FileID()\n try:\n fid.name = Path(item).name\n except NotImplementedError:\n fid.name = os.path.basename(item)\n fid.full_name = tbl[\"raw_data_full_name\"][counter]\n fid.size = tbl[\"raw_data_size\"][counter]\n fid.last_modified = tbl[\"raw_data_last_modified\"][counter]\n fid.last_accessed = tbl[\"raw_data_last_accessed\"][counter]\n fid.last_info_changed = tbl[\"raw_data_last_info_changed\"][counter]\n fid.location = tbl[\"raw_data_location\"][counter]\n length = tbl[\"raw_data_files_length\"][counter]\n if \"last_data_point\" in tbl.columns:\n fid.last_data_point = tbl[\"last_data_point\"][counter]\n else:\n fid.last_data_point = 0\n fids.append(fid)\n lengths.append(length)\n min_amount = 1\n if min_amount < 1:\n logging.debug(\"info about raw files missing\")\n return fids, lengths\n\n def merge(self, datasets=None, separate_datasets=False):\n \"\"\"This function merges datasets into one set.\"\"\"\n\n logging.info(\"Merging\")\n if separate_datasets:\n warnings.warn(\n \"The option separate_datasets=True is\"\n \"not implemented yet. Performing merging, but\"\n \"neglecting the option.\"\n )\n else:\n if datasets is None:\n datasets = list(range(len(self.cells)))\n first = True\n for dataset_number in datasets:\n if first:\n dataset = self.cells[dataset_number]\n first = False\n else:\n dataset = self._append(dataset, self.cells[dataset_number])\n for raw_data_file, file_size in zip(\n self.cells[dataset_number].raw_data_files,\n self.cells[dataset_number].raw_data_files_length,\n ):\n dataset.raw_data_files.append(raw_data_file)\n dataset.raw_data_files_length.append(file_size)\n self.cells = [dataset]\n self.number_of_datasets = 1\n return self\n\n def _append(self, t1, t2, merge_summary=True, merge_step_table=True):\n logging.debug(\n f\"merging two datasets\\n(merge summary = {merge_summary})\\n\"\n f\"(merge step table = {merge_step_table})\"\n )\n if t1.raw.empty:\n logging.debug(\"OBS! the first dataset is empty\")\n\n if t2.raw.empty:\n t1.merged = True\n logging.debug(\"the second dataset was empty\")\n logging.debug(\" -> merged contains only first\")\n return t1\n test = t1\n # finding diff of time\n start_time_1 = t1.start_datetime\n start_time_2 = t2.start_datetime\n if self.tester in [\"arbin\", \"arbin_res\"]:\n diff_time = xldate_as_datetime(start_time_2) - xldate_as_datetime(\n start_time_1\n )\n else:\n diff_time = start_time_2 - start_time_1\n diff_time = diff_time.total_seconds()\n\n if diff_time < 0:\n logging.warning(\"Wow! your new dataset is older than the old!\")\n logging.debug(f\"diff time: {diff_time}\")\n\n sort_key = self.headers_normal.datetime_txt # DateTime\n # mod data points for set 2\n data_point_header = self.headers_normal.data_point_txt\n try:\n last_data_point = max(t1.raw[data_point_header])\n except ValueError:\n logging.debug(\"ValueError when getting last data point for r1\")\n last_data_point = 0\n\n t2.raw[data_point_header] = t2.raw[data_point_header] + last_data_point\n logging.debug(\"No error getting last data point for r2\")\n # mod cycle index for set 2\n cycle_index_header = self.headers_summary.cycle_index\n try:\n last_cycle = max(t1.raw[cycle_index_header])\n except ValueError:\n logging.debug(\"ValueError when getting last cycle index for r1\")\n last_cycle = 0\n t2.raw[cycle_index_header] = t2.raw[cycle_index_header] + last_cycle\n # mod test time for set 2\n test_time_header = self.headers_normal.test_time_txt\n t2.raw[test_time_header] = t2.raw[test_time_header] + diff_time\n # merging\n if not t1.raw.empty:\n logging.debug(\"r1 is not empty - performing concat\")\n raw2 = pd.concat([t1.raw, t2.raw], ignore_index=True)\n\n # checking if we already have made a summary file of these datasets\n # (to be used if merging summaries (but not properly implemented yet))\n if t1.summary.empty or t2.summary.empty:\n summary_made = False\n else:\n summary_made = True\n\n try:\n _ = t1.summary[\n cycle_index_header\n ] # during loading arbin res files, a stats-frame is loaded into\n _ = t2.summary[\n cycle_index_header\n ] # the summary. This prevents merging those.\n except KeyError:\n summary_made = False\n logging.info(\"The summary is not complete - run make_summary()\")\n\n # checking if we already have made step tables for these datasets\n if t1.steps_made and t2.steps_made:\n step_table_made = True\n else:\n step_table_made = False\n\n if merge_summary and summary_made:\n # check if (self-made) summary exists.\n logging.debug(\"merge summaries\")\n\n # This part of the code is seldom ran. Careful!\n # mod cycle index for set 2\n last_cycle = max(t1.summary[cycle_index_header])\n t2.summary[cycle_index_header] = (\n t2.summary[cycle_index_header] + last_cycle\n )\n # mod test time for set 2\n t2.summary[test_time_header] = t2.summary[test_time_header] + diff_time\n # to-do: mod all the cumsum stuff in the summary (best to make\n # summary after merging) merging\n\n t2.summary[data_point_header] = (\n t2.summary[data_point_header] + last_data_point\n )\n\n summary2 = pd.concat([t1.summary, t2.summary], ignore_index=True)\n\n test.summary = summary2\n else:\n logging.debug(\n \"could not merge summary tables \"\n \"(non-existing) -\"\n \"create them first!\"\n )\n\n if merge_step_table:\n if step_table_made:\n cycle_index_header = self.headers_normal.cycle_index_txt\n t2.steps[self.headers_step_table.cycle] = (\n t2.raw[self.headers_step_table.cycle] + last_cycle\n )\n\n steps2 = pd.concat([t1.steps, t2.steps], ignore_index=True)\n test.steps = steps2\n else:\n logging.debug(\n \"could not merge step tables \"\n \"(non-existing) -\"\n \"create them first!\"\n )\n\n test.no_cycles = max(raw2[cycle_index_header])\n test.raw = raw2\n else:\n test.no_cycles = max(t2.raw[cycle_index_header])\n test = t2\n test.merged = True\n logging.debug(\" -> merged with new dataset\")\n # TODO: @jepe - update merging for more variables\n return test\n\n # --------------iterate-and-find-in-data-----------------------------------\n # TODO: make this obsolete (somehow)\n def _validate_dataset_number(self, n, check_for_empty=True):\n # Returns dataset_number (or None if empty)\n # Remark! _is_not_empty_dataset returns True or False\n\n if not len(self.cells):\n logging.info(\n \"Can't see any datasets! Are you sure you have \" \"loaded anything?\"\n )\n return\n\n if n is not None:\n v = n\n else:\n if self.selected_cell_number is None:\n v = 0\n else:\n v = self.selected_cell_number\n\n if check_for_empty:\n not_empty = self._is_not_empty_dataset(self.cells[v])\n if not_empty:\n return v\n else:\n return None\n else:\n return v\n\n # TODO: check if this can be moved to helpers\n def _validate_step_table(self, dataset_number=None, simple=False):\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n step_index_header = self.headers_normal.step_index_txt\n logging.debug(\"-validating step table\")\n d = self.cells[dataset_number].raw\n s = self.cells[dataset_number].steps\n\n if not self.cells[dataset_number].steps_made:\n return False\n\n no_cycles_raw = np.amax(d[self.headers_normal.cycle_index_txt])\n headers_step_table = self.headers_step_table\n no_cycles_step_table = np.amax(s[headers_step_table.cycle])\n\n if simple:\n logging.debug(\" (simple)\")\n if no_cycles_raw == no_cycles_step_table:\n return True\n else:\n return False\n\n else:\n validated = True\n if no_cycles_raw != no_cycles_step_table:\n logging.debug(\" differ in no. of cycles\")\n validated = False\n else:\n for j in range(1, no_cycles_raw + 1):\n cycle_number = j\n no_steps_raw = len(\n np.unique(\n d.loc[\n d[self.headers_normal.cycle_index_txt] == cycle_number,\n self.headers_normal.step_index_txt,\n ]\n )\n )\n no_steps_step_table = len(\n s.loc[\n s[headers_step_table.cycle] == cycle_number,\n headers_step_table.step,\n ]\n )\n if no_steps_raw != no_steps_step_table:\n validated = False\n # txt = (\"Error in step table \"\n # \"(cycle: %i) d: %i, s:%i)\" % (\n # cycle_number,\n # no_steps_raw,\n # no_steps_steps\n # )\n # )\n #\n # logging.debug(txt)\n return validated\n\n def print_steps(self, dataset_number=None):\n \"\"\"Print the step table.\"\"\"\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n st = self.cells[dataset_number].steps\n print(st)\n\n def get_step_numbers(\n self,\n steptype=\"charge\",\n allctypes=True,\n pdtype=False,\n cycle_number=None,\n dataset_number=None,\n trim_taper_steps=None,\n steps_to_skip=None,\n steptable=None,\n ):\n # TODO: @jepe - include sub_steps here\n # TODO: @jepe - include option for not selecting taper steps here\n \"\"\"Get the step numbers of selected type.\n\n Returns the selected step_numbers for the selected type of step(s).\n\n Args:\n steptype (string): string identifying type of step.\n allctypes (bool): get all types of charge (or discharge).\n pdtype (bool): return results as pandas.DataFrame\n cycle_number (int): selected cycle, selects all if not set.\n dataset_number (int): test number (default first)\n (usually not used).\n trim_taper_steps (integer): number of taper steps to skip (counted\n from the end, i.e. 1 means skip last step in each cycle).\n steps_to_skip (list): step numbers that should not be included.\n steptable (pandas.DataFrame): optional steptable\n\n Returns:\n A dictionary containing a list of step numbers corresponding\n to the selected steptype for the cycle(s).\n Returns a pandas.DataFrame instead of a dict of lists if pdtype is\n set to True. The frame is a sub-set of the step-table frame\n (i.e. all the same columns, only filtered by rows).\n\n Example:\n >>> my_charge_steps = CellpyData.get_step_numbers(\n >>> \"charge\",\n >>> cycle_number = 3\n >>> )\n >>> print my_charge_steps\n {3: [5,8]}\n\n \"\"\"\n t0 = time.time()\n # logging.debug(\"Trying to get step-types\")\n if steps_to_skip is None:\n steps_to_skip = []\n\n if steptable is None:\n dataset_number = self._validate_dataset_number(dataset_number)\n # logging.debug(f\"dt 1: {time.time() - t0}\")\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n if not self.cells[dataset_number].steps_made:\n logging.debug(\"steps is not made\")\n\n if self.force_step_table_creation or self.force_all:\n logging.debug(\"creating step_table for\")\n logging.debug(self.cells[dataset_number].loaded_from)\n self.make_step_table(dataset_number=dataset_number)\n\n else:\n logging.info(\"ERROR! Cannot use get_steps: create step_table first\")\n logging.info(\"You could use find_step_numbers method instead\")\n logging.info(\"(but I don't recommend it)\")\n return None\n\n # check if steptype is valid\n steptype = steptype.lower()\n steptypes = []\n helper_step_types = [\"ocv\", \"charge_discharge\"]\n valid_step_type = True\n # logging.debug(f\"dt 2: {time.time() - t0}\")\n if steptype in self.list_of_step_types:\n steptypes.append(steptype)\n else:\n txt = \"%s is not a valid core steptype\" % steptype\n if steptype in helper_step_types:\n txt = \"but a helper steptype\"\n if steptype == \"ocv\":\n steptypes.append(\"ocvrlx_up\")\n steptypes.append(\"ocvrlx_down\")\n elif steptype == \"charge_discharge\":\n steptypes.append(\"charge\")\n steptypes.append(\"discharge\")\n else:\n valid_step_type = False\n # logging.debug(txt)\n if not valid_step_type:\n return None\n\n # in case of selection allctypes, then modify charge, discharge\n if allctypes:\n add_these = []\n for st in steptypes:\n if st in [\"charge\", \"discharge\"]:\n st1 = st + \"_cv\"\n add_these.append(st1)\n st1 = \"cv_\" + st\n add_these.append(st1)\n for st in add_these:\n steptypes.append(st)\n\n # logging.debug(\"Your steptypes:\")\n # logging.debug(steptypes)\n\n if steptable is None:\n st = self.cells[dataset_number].steps\n else:\n st = steptable\n shdr = self.headers_step_table\n\n # retrieving cycle numbers\n # logging.debug(f\"dt 3: {time.time() - t0}\")\n if cycle_number is None:\n cycle_numbers = self.get_cycle_numbers(dataset_number, steptable=steptable)\n else:\n if isinstance(cycle_number, collections.abc.Iterable):\n cycle_numbers = cycle_number\n else:\n cycle_numbers = [cycle_number]\n\n if trim_taper_steps is not None:\n trim_taper_steps = -trim_taper_steps\n # logging.debug(\"taper steps to trim given\")\n\n if pdtype:\n # logging.debug(\"Return pandas dataframe.\")\n if trim_taper_steps:\n logging.info(\n \"Trimming taper steps is currently not\"\n \"possible when returning pd.DataFrame. \"\n \"Do it manually insteaD.\"\n )\n out = st[st[shdr.type].isin(steptypes) & st[shdr.cycle].isin(cycle_numbers)]\n return out\n\n # if not pdtype, return a dict instead\n # logging.debug(\"out as dict; out[cycle] = [s1,s2,...]\")\n # logging.debug(\"(same behaviour as find_step_numbers)\")\n # logging.debug(\"return dict of lists\")\n # logging.warning(\n # \"returning dict will be deprecated\",\n # )\n out = dict()\n # logging.debug(f\"return a dict\")\n # logging.debug(f\"dt 4: {time.time() - t0}\")\n for cycle in cycle_numbers:\n steplist = []\n for s in steptypes:\n mask_type_and_cycle = (st[shdr.type] == s) & (st[shdr.cycle] == cycle)\n if not any(mask_type_and_cycle):\n logging.debug(f\"found nothing for cycle {cycle}\")\n else:\n step = st[mask_type_and_cycle][shdr.step].tolist()\n for newstep in step[:trim_taper_steps]:\n if newstep in steps_to_skip:\n logging.debug(f\"skipping step {newstep}\")\n else:\n steplist.append(int(newstep))\n\n if not steplist:\n steplist = [0]\n out[cycle] = steplist\n # logging.debug(f\"dt tot: {time.time() - t0}\")\n return out\n\n def load_step_specifications(self, file_name, short=False, dataset_number=None):\n \"\"\" Load a table that contains step-type definitions.\n\n This function loads a file containing a specification for each step or\n for each (cycle_number, step_number) combinations if short==False. The\n step_cycle specifications that are allowed are stored in the variable\n cellreader.list_of_step_types.\n \"\"\"\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n # if short:\n # # the table only consists of steps (not cycle,step pairs) assuming\n # # that the step numbers uniquely defines step type (this is true\n # # for arbin at least).\n # raise NotImplementedError\n\n step_specs = pd.read_csv(file_name, sep=prms.Reader.sep)\n if \"step\" not in step_specs.columns:\n logging.info(\"Missing column: step\")\n raise IOError\n\n if \"type\" not in step_specs.columns:\n logging.info(\"Missing column: type\")\n raise IOError\n\n if not short and \"cycle\" not in step_specs.columns:\n logging.info(\"Missing column: cycle\")\n raise IOError\n\n self.make_step_table(step_specifications=step_specs, short=short)\n\n def _sort_data(self, dataset):\n # TODO: [# index]\n if self.headers_normal.data_point_txt in dataset.raw.columns:\n dataset.raw = dataset.raw.sort_values(\n self.headers_normal.data_point_txt\n ).reset_index()\n return dataset\n\n logging.debug(\"_sort_data: no datapoint header to sort by\")\n\n def _ustep(self, n):\n un = []\n c = 0\n n = n.diff()\n for i in n:\n if i != 0:\n c += 1\n un.append(c)\n logging.debug(\"created u-steps\")\n return un\n\n def make_step_table(\n self,\n step_specifications=None,\n short=False,\n profiling=False,\n all_steps=False,\n add_c_rate=True,\n skip_steps=None,\n sort_rows=True,\n dataset_number=None,\n from_data_point=None,\n ):\n\n \"\"\" Create a table (v.4) that contains summary information for each step.\n\n This function creates a table containing information about the\n different steps for each cycle and, based on that, decides what type of\n step it is (e.g. charge) for each cycle.\n\n The format of the steps is:\n\n index: cycleno - stepno - sub-step-no - ustep\n Time info (average, stdev, max, min, start, end, delta) -\n Logging info (average, stdev, max, min, start, end, delta) -\n Current info (average, stdev, max, min, start, end, delta) -\n Voltage info (average, stdev, max, min, start, end, delta) -\n Type (from pre-defined list) - SubType -\n Info\n\n Args:\n step_specifications (pandas.DataFrame): step specifications\n short (bool): step specifications in short format\n profiling (bool): turn on profiling\n\n all_steps (bool): investigate all steps including same steps within\n one cycle (this is useful for e.g. GITT).\n add_c_rate (bool): include a C-rate estimate in the steps\n skip_steps (list of integers): list of step numbers that should not\n be processed (future feature - not used yet).\n sort_rows (bool): sort the rows after processing.\n dataset_number: defaults to self.dataset_number\n from_data_point (int): first data point to use\n\n Returns:\n None\n \"\"\"\n # TODO: @jepe - include option for omitting steps\n # TODO: @jepe - make it is possible to update only new data\n\n time_00 = time.time()\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n if profiling:\n print(\"PROFILING MAKE_STEP_TABLE\".center(80, \"=\"))\n\n def first(x):\n return x.iloc[0]\n\n def last(x):\n return x.iloc[-1]\n\n def delta(x):\n if x.iloc[0] == 0.0:\n # starts from a zero value\n difference = 100.0 * x.iloc[-1]\n else:\n difference = (x.iloc[-1] - x.iloc[0]) * 100 / abs(x.iloc[0])\n\n return difference\n\n nhdr = self.headers_normal\n shdr = self.headers_step_table\n\n if from_data_point is not None:\n df = self.cells[dataset_number].raw.loc[\n self.cells[dataset_number].raw[nhdr.data_point_txt] >= from_data_point\n ]\n else:\n df = self.cells[dataset_number].raw\n # df[shdr.internal_resistance_change] = \\\n # df[nhdr.internal_resistance_txt].pct_change()\n\n # selecting only the most important columns from raw:\n keep = [\n nhdr.data_point_txt,\n nhdr.test_time_txt,\n nhdr.step_time_txt,\n nhdr.step_index_txt,\n nhdr.cycle_index_txt,\n nhdr.current_txt,\n nhdr.voltage_txt,\n nhdr.ref_voltage_txt,\n nhdr.charge_capacity_txt,\n nhdr.discharge_capacity_txt,\n nhdr.internal_resistance_txt,\n # \"ir_pct_change\"\n ]\n\n # only use col-names that exist:\n keep = [col for col in keep if col in df.columns]\n df = df[keep]\n # preparing for implementation of sub_steps (will come in the future):\n df[nhdr.sub_step_index_txt] = 1\n\n # using headers as defined in the internal_settings.py file\n rename_dict = {\n nhdr.cycle_index_txt: shdr.cycle,\n nhdr.step_index_txt: shdr.step,\n nhdr.sub_step_index_txt: shdr.sub_step,\n nhdr.data_point_txt: shdr.point,\n nhdr.test_time_txt: shdr.test_time,\n nhdr.step_time_txt: shdr.step_time,\n nhdr.current_txt: shdr.current,\n nhdr.voltage_txt: shdr.voltage,\n nhdr.charge_capacity_txt: shdr.charge,\n nhdr.discharge_capacity_txt: shdr.discharge,\n nhdr.internal_resistance_txt: shdr.internal_resistance,\n }\n\n df = df.rename(columns=rename_dict)\n by = [shdr.cycle, shdr.step, shdr.sub_step]\n\n if skip_steps is not None:\n logging.debug(f\"omitting steps {skip_steps}\")\n df = df.loc[~df[shdr.step].isin(skip_steps)]\n\n if all_steps:\n by.append(shdr.ustep)\n df[shdr.ustep] = self._ustep(df[shdr.step])\n\n logging.debug(f\"groupby: {by}\")\n\n if profiling:\n time_01 = time.time()\n\n # TODO: make sure that all columns are nummeric\n\n gf = df.groupby(by=by)\n df_steps = gf.agg(\n [np.mean, np.std, np.amin, np.amax, first, last, delta]\n ).rename(columns={\"amin\": \"min\", \"amax\": \"max\", \"mean\": \"avr\"})\n\n df_steps = df_steps.reset_index()\n\n if profiling:\n print(f\"*** groupby-agg: {time.time() - time_01} s\")\n time_01 = time.time()\n\n # new cols\n\n # column with C-rates:\n if add_c_rate:\n nom_cap = self.cells[dataset_number].nom_cap\n mass = self.cells[dataset_number].mass\n spec_conv_factor = self.get_converter_to_specific()\n logging.debug(f\"c-rate: nom_cap={nom_cap} spec_conv={spec_conv_factor}\")\n\n df_steps[shdr.rate_avr] = abs(\n round(\n df_steps.loc[:, (shdr.current, \"avr\")]\n / (nom_cap / spec_conv_factor),\n 2,\n )\n )\n\n df_steps[shdr.type] = np.nan\n df_steps[shdr.sub_type] = np.nan\n df_steps[shdr.info] = np.nan\n\n if step_specifications is None:\n current_limit_value_hard = self.raw_limits[\"current_hard\"]\n current_limit_value_soft = self.raw_limits[\"current_soft\"]\n stable_current_limit_hard = self.raw_limits[\"stable_current_hard\"]\n stable_current_limit_soft = self.raw_limits[\"stable_current_soft\"]\n stable_voltage_limit_hard = self.raw_limits[\"stable_voltage_hard\"]\n stable_voltage_limit_soft = self.raw_limits[\"stable_voltage_soft\"]\n stable_charge_limit_hard = self.raw_limits[\"stable_charge_hard\"]\n stable_charge_limit_soft = self.raw_limits[\"stable_charge_soft\"]\n ir_change_limit = self.raw_limits[\"ir_change\"]\n\n mask_no_current_hard = (\n df_steps.loc[:, (shdr.current, \"max\")].abs()\n + df_steps.loc[:, (shdr.current, \"min\")].abs()\n ) < current_limit_value_hard / 2\n\n mask_voltage_down = (\n df_steps.loc[:, (shdr.voltage, \"delta\")] < -stable_voltage_limit_hard\n )\n\n mask_voltage_up = (\n df_steps.loc[:, (shdr.voltage, \"delta\")] > stable_voltage_limit_hard\n )\n\n mask_voltage_stable = (\n df_steps.loc[:, (shdr.voltage, \"delta\")].abs()\n < stable_voltage_limit_hard\n )\n\n mask_current_down = (\n df_steps.loc[:, (shdr.current, \"delta\")] < -stable_current_limit_soft\n )\n\n mask_current_up = (\n df_steps.loc[:, (shdr.current, \"delta\")] > stable_current_limit_soft\n )\n\n mask_current_negative = (\n df_steps.loc[:, (shdr.current, \"avr\")] < -current_limit_value_hard\n )\n\n mask_current_positive = (\n df_steps.loc[:, (shdr.current, \"avr\")] > current_limit_value_hard\n )\n\n mask_galvanostatic = (\n df_steps.loc[:, (shdr.current, \"delta\")].abs()\n < stable_current_limit_soft\n )\n\n mask_charge_changed = (\n df_steps.loc[:, (shdr.charge, \"delta\")].abs() > stable_charge_limit_hard\n )\n\n mask_discharge_changed = (\n df_steps.loc[:, (shdr.discharge, \"delta\")].abs()\n > stable_charge_limit_hard\n )\n\n mask_no_change = (\n (df_steps.loc[:, (shdr.voltage, \"delta\")] == 0)\n & (df_steps.loc[:, (shdr.current, \"delta\")] == 0)\n & (df_steps.loc[:, (shdr.charge, \"delta\")] == 0)\n & (df_steps.loc[:, (shdr.discharge, \"delta\")] == 0)\n )\n\n # TODO: make an option for only checking unique steps\n # e.g.\n # df_x = df_steps.where.steps.are.unique\n\n df_steps.loc[\n mask_no_current_hard & mask_voltage_stable, (shdr.type, slice(None))\n ] = \"rest\"\n\n df_steps.loc[\n mask_no_current_hard & mask_voltage_up, (shdr.type, slice(None))\n ] = \"ocvrlx_up\"\n\n df_steps.loc[\n mask_no_current_hard & mask_voltage_down, (shdr.type, slice(None))\n ] = \"ocvrlx_down\"\n\n df_steps.loc[\n mask_discharge_changed & mask_current_negative, (shdr.type, slice(None))\n ] = \"discharge\"\n\n df_steps.loc[\n mask_charge_changed & mask_current_positive, (shdr.type, slice(None))\n ] = \"charge\"\n\n df_steps.loc[\n mask_voltage_stable & mask_current_negative & mask_current_down,\n (shdr.type, slice(None)),\n ] = \"cv_discharge\"\n\n df_steps.loc[\n mask_voltage_stable & mask_current_positive & mask_current_down,\n (shdr.type, slice(None)),\n ] = \"cv_charge\"\n\n # --- internal resistance ----\n df_steps.loc[mask_no_change, (shdr.type, slice(None))] = \"ir\"\n # assumes that IR is stored in just one row\n\n # --- sub-step-txt -----------\n df_steps[shdr.sub_type] = None\n\n # --- CV steps ----\n\n # \"voltametry_charge\"\n # mask_charge_changed\n # mask_voltage_up\n # (could also include abs-delta-cumsum current)\n\n # \"voltametry_discharge\"\n # mask_discharge_changed\n # mask_voltage_down\n\n if profiling:\n print(f\"*** masking: {time.time() - time_01} s\")\n time_01 = time.time()\n\n else:\n logging.debug(\"parsing custom step definition\")\n if not short:\n logging.debug(\"using long format (cycle,step)\")\n for row in step_specifications.itertuples():\n df_steps.loc[\n (df_steps[shdr.step] == row.step)\n & (df_steps[shdr.cycle] == row.cycle),\n (shdr.type, slice(None)),\n ] = row.type\n df_steps.loc[\n (df_steps[shdr.step] == row.step)\n & (df_steps[shdr.cycle] == row.cycle),\n (shdr.info, slice(None)),\n ] = row.info\n else:\n logging.debug(\"using short format (step)\")\n for row in step_specifications.itertuples():\n df_steps.loc[\n df_steps[shdr.step] == row.step, (shdr.type, slice(None))\n ] = row.type\n df_steps.loc[\n df_steps[shdr.step] == row.step, (shdr.info, slice(None))\n ] = row.info\n\n if profiling:\n print(f\"*** introspect: {time.time() - time_01} s\")\n\n # check if all the steps got categorizes\n logging.debug(\"looking for un-categorized steps\")\n empty_rows = df_steps.loc[df_steps[shdr.type].isnull()]\n if not empty_rows.empty:\n logging.warning(\n f\"found {len(empty_rows)}\"\n f\":{len(df_steps)} non-categorized steps \"\n f\"(please, check your raw-limits)\"\n )\n # logging.debug(empty_rows)\n\n # flatten (possible remove in the future),\n # (maybe we will implement mulitindexed tables)\n\n logging.debug(f\"flatten columns\")\n if profiling:\n time_01 = time.time()\n flat_cols = []\n for col in df_steps.columns:\n if isinstance(col, tuple):\n if col[-1]:\n col = \"_\".join(col)\n else:\n col = col[0]\n flat_cols.append(col)\n\n df_steps.columns = flat_cols\n if sort_rows:\n logging.debug(\"sorting the step rows\")\n # TODO: [#index]\n # if this throws a KeyError: 'test_time_first' it probably\n # means that the df contains a non-nummeric 'test_time' column.\n df_steps = df_steps.sort_values(by=shdr.test_time + \"_first\").reset_index()\n\n if profiling:\n print(f\"*** flattening: {time.time() - time_01} s\")\n\n logging.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n\n if from_data_point is not None:\n return df_steps\n else:\n self.cells[dataset_number].steps = df_steps\n return self\n\n def select_steps(self, step_dict, append_df=False, dataset_number=None):\n \"\"\"Select steps (not documented yet).\"\"\"\n raise DeprecatedFeature\n\n def _select_step(self, cycle, step, dataset_number=None):\n # TODO: @jepe - insert sub_step here\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n test = self.cells[dataset_number]\n\n # check if columns exist\n c_txt = self.headers_normal.cycle_index_txt\n s_txt = self.headers_normal.step_index_txt\n y_txt = self.headers_normal.voltage_txt\n x_txt = self.headers_normal.discharge_capacity_txt # jepe fix\n\n # no_cycles=np.amax(test.raw[c_txt])\n # print d.columns\n\n if not any(test.raw.columns == c_txt):\n logging.info(\"ERROR - cannot find %s\" % c_txt)\n sys.exit(-1)\n if not any(test.raw.columns == s_txt):\n logging.info(\"ERROR - cannot find %s\" % s_txt)\n sys.exit(-1)\n\n # logging.debug(f\"selecting cycle {cycle} step {step}\")\n v = test.raw[(test.raw[c_txt] == cycle) & (test.raw[s_txt] == step)]\n\n if self.is_empty(v):\n logging.debug(\"empty dataframe\")\n return None\n else:\n return v\n\n def populate_step_dict(self, step, dataset_number=None):\n \"\"\"Returns a dict with cycle numbers as keys\n and corresponding steps (list) as values.\"\"\"\n raise DeprecatedFeature\n\n def _export_cycles(\n self,\n dataset_number,\n setname=None,\n sep=None,\n outname=None,\n shifted=False,\n method=None,\n shift=0.0,\n last_cycle=None,\n ):\n # export voltage - capacity curves to .csv file\n\n logging.debug(\"START exporing cycles\")\n time_00 = time.time()\n lastname = \"_cycles.csv\"\n if sep is None:\n sep = self.sep\n if outname is None:\n outname = setname + lastname\n\n logging.debug(f\"outname: {outname}\")\n\n list_of_cycles = self.get_cycle_numbers(dataset_number=dataset_number)\n if last_cycle is not None:\n list_of_cycles = [c for c in list_of_cycles if c <= int(last_cycle)]\n logging.debug(f\"only processing up to cycle {last_cycle}\")\n logging.debug(f\"you have {len(list_of_cycles)}\" f\"cycles to process\")\n out_data = []\n c = None\n if not method:\n method = \"back-and-forth\"\n if shifted:\n method = \"back-and-forth\"\n shift = 0.0\n _last = 0.0\n logging.debug(f\"number of cycles: {len(list_of_cycles)}\")\n for cycle in list_of_cycles:\n try:\n if shifted and c is not None:\n shift = _last\n # print(f\"shifted = {shift}, first={_first}\")\n df = self.get_cap(\n cycle, dataset_number=dataset_number, method=method, shift=shift\n )\n if df.empty:\n logging.debug(\"NoneType from get_cap\")\n else:\n c = df[\"capacity\"]\n v = df[\"voltage\"]\n\n _last = c.iat[-1]\n _first = c.iat[0]\n\n c = c.tolist()\n v = v.tolist()\n header_x = \"cap cycle_no %i\" % cycle\n header_y = \"voltage cycle_no %i\" % cycle\n c.insert(0, header_x)\n v.insert(0, header_y)\n out_data.append(c)\n out_data.append(v)\n # txt = \"extracted cycle %i\" % cycle\n # logging.debug(txt)\n except IndexError as e:\n txt = \"Could not extract cycle %i\" % cycle\n logging.info(txt)\n logging.debug(e)\n\n # Saving cycles in one .csv file (x,y,x,y,x,y...)\n # print \"saving the file with delimiter '%s' \" % (sep)\n logging.debug(\"writing cycles to file\")\n with open(outname, \"w\", newline=\"\") as f:\n writer = csv.writer(f, delimiter=sep)\n writer.writerows(itertools.zip_longest(*out_data))\n # star (or asterix) means transpose (writing cols instead of rows)\n\n logging.info(f\"The file {outname} was created\")\n logging.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n logging.debug(\"END exporting cycles\")\n\n # TODO: remove this\n def _export_cycles_old(\n self,\n dataset_number,\n setname=None,\n sep=None,\n outname=None,\n shifted=False,\n method=None,\n shift=0.0,\n last_cycle=None,\n ):\n # export voltage - capacity curves to .csv file\n\n logging.debug(\"*** OLD EXPORT-CYCLES METHOD***\")\n lastname = \"_cycles.csv\"\n if sep is None:\n sep = self.sep\n if outname is None:\n outname = setname + lastname\n\n list_of_cycles = self.get_cycle_numbers(dataset_number=dataset_number)\n logging.debug(f\"you have {len(list_of_cycles)} cycles\")\n if last_cycle is not None:\n list_of_cycles = [c for c in list_of_cycles if c <= int(last_cycle)]\n logging.debug(f\"only processing up to cycle {last_cycle}\")\n logging.debug(f\"you have {len(list_of_cycles)}\" f\"cycles to process\")\n out_data = []\n c = None\n if not method:\n method = \"back-and-forth\"\n if shifted:\n method = \"back-and-forth\"\n shift = 0.0\n _last = 0.0\n\n for cycle in list_of_cycles:\n try:\n if shifted and c is not None:\n shift = _last\n # print(f\"shifted = {shift}, first={_first}\")\n c, v = self.get_cap(\n cycle, dataset_number=dataset_number, method=method, shift=shift\n )\n if c is None:\n logging.debug(\"NoneType from get_cap\")\n else:\n _last = c.iat[-1]\n _first = c.iat[0]\n\n c = c.tolist()\n v = v.tolist()\n header_x = \"cap cycle_no %i\" % cycle\n header_y = \"voltage cycle_no %i\" % cycle\n c.insert(0, header_x)\n v.insert(0, header_y)\n out_data.append(c)\n out_data.append(v)\n # txt = \"extracted cycle %i\" % cycle\n # logging.debug(txt)\n except IndexError as e:\n txt = \"Could not extract cycle %i\" % cycle\n logging.info(txt)\n logging.debug(e)\n\n # Saving cycles in one .csv file (x,y,x,y,x,y...)\n # print \"saving the file with delimiter '%s' \" % (sep)\n logging.debug(\"writing cycles to file\")\n with open(outname, \"w\", newline=\"\") as f:\n writer = csv.writer(f, delimiter=sep)\n writer.writerows(itertools.zip_longest(*out_data))\n # star (or asterix) means transpose (writing cols instead of rows)\n logging.info(f\"The file {outname} was created\")\n\n def _export_normal(self, data, setname=None, sep=None, outname=None):\n time_00 = time.time()\n lastname = \"_normal.csv\"\n if sep is None:\n sep = self.sep\n if outname is None:\n outname = setname + lastname\n txt = outname\n try:\n data.raw.to_csv(outname, sep=sep)\n txt += \" OK\"\n except Exception as e:\n txt += \" Could not save it!\"\n logging.debug(e)\n warnings.warn(f\"Unhandled exception raised: {e}\")\n logging.info(txt)\n logging.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n\n def _export_stats(self, data, setname=None, sep=None, outname=None):\n time_00 = time.time()\n lastname = \"_stats.csv\"\n if sep is None:\n sep = self.sep\n if outname is None:\n outname = setname + lastname\n txt = outname\n try:\n data.summary.to_csv(outname, sep=sep)\n txt += \" OK\"\n except Exception as e:\n txt += \" Could not save it!\"\n logging.debug(e)\n warnings.warn(f\"Unhandled exception raised: {e}\")\n logging.info(txt)\n logging.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n\n def _export_steptable(self, data, setname=None, sep=None, outname=None):\n time_00 = time.time()\n lastname = \"_steps.csv\"\n if sep is None:\n sep = self.sep\n if outname is None:\n outname = setname + lastname\n txt = outname\n try:\n data.steps.to_csv(outname, sep=sep)\n txt += \" OK\"\n except Exception as e:\n txt += \" Could not save it!\"\n logging.debug(e)\n warnings.warn(f\"Unhandled exception raised: {e}\")\n logging.info(txt)\n logging.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n\n def to_csv(\n self,\n datadir=None,\n sep=None,\n cycles=False,\n raw=True,\n summary=True,\n shifted=False,\n method=None,\n shift=0.0,\n last_cycle=None,\n ):\n \"\"\"Saves the data as .csv file(s).\n\n Args:\n datadir: folder where to save the data (uses current folder if not\n given).\n sep: the separator to use in the csv file\n (defaults to CellpyData.sep).\n cycles: (bool) export voltage-capacity curves if True.\n raw: (bool) export raw-data if True.\n summary: (bool) export summary if True.\n shifted (bool): export with cumulated shift.\n method (string): how the curves are given\n \"back-and-forth\" - standard back and forth; discharge\n (or charge) reversed from where charge (or\n discharge) ends.\n \"forth\" - discharge (or charge) continues along x-axis.\n \"forth-and-forth\" - discharge (or charge) also starts at 0 (or\n shift if not shift=0.0)\n shift: start-value for charge (or discharge)\n last_cycle: process only up to this cycle (if not None).\n\n Returns: Nothing\n\n \"\"\"\n\n if sep is None:\n sep = self.sep\n\n logging.debug(\"saving to csv\")\n\n dataset_number = -1\n for data in self.cells:\n dataset_number += 1\n if not self._is_not_empty_dataset(data):\n logging.info(\"to_csv -\")\n logging.info(\"empty test [%i]\" % dataset_number)\n logging.info(\"not saved!\")\n else:\n if isinstance(data.loaded_from, (list, tuple)):\n txt = \"merged file\"\n txt += \"using first file as basename\"\n logging.debug(txt)\n no_merged_sets = len(data.loaded_from)\n no_merged_sets = \"_merged_\" + str(no_merged_sets).zfill(3)\n filename = data.loaded_from[0]\n else:\n filename = data.loaded_from\n no_merged_sets = \"\"\n firstname, extension = os.path.splitext(filename)\n firstname += no_merged_sets\n if datadir:\n firstname = os.path.join(datadir, os.path.basename(firstname))\n\n if raw:\n outname_normal = firstname + \"_normal.csv\"\n self._export_normal(data, outname=outname_normal, sep=sep)\n if data.steps_made is True:\n outname_steps = firstname + \"_steps.csv\"\n self._export_steptable(data, outname=outname_steps, sep=sep)\n else:\n logging.debug(\"steps_made is not True\")\n\n if summary:\n outname_stats = firstname + \"_stats.csv\"\n self._export_stats(data, outname=outname_stats, sep=sep)\n\n if cycles:\n outname_cycles = firstname + \"_cycles.csv\"\n self._export_cycles(\n outname=outname_cycles,\n dataset_number=dataset_number,\n sep=sep,\n shifted=shifted,\n method=method,\n shift=shift,\n last_cycle=last_cycle,\n )\n\n def save(\n self,\n filename,\n dataset_number=None,\n force=False,\n overwrite=True,\n extension=\"h5\",\n ensure_step_table=None,\n ):\n \"\"\"Save the data structure to cellpy-format.\n\n Args:\n filename: (str or pathlib.Path) the name you want to give the file\n dataset_number: (int) if you have several datasets, chose the one\n you want (probably leave this untouched)\n force: (bool) save a file even if the summary is not made yet\n (not recommended)\n overwrite: (bool) save the new version of the file even if old one\n exists.\n extension: (str) filename extension.\n ensure_step_table: (bool) make step-table if missing.\n\n Returns: Nothing at all.\n \"\"\"\n logging.debug(f\"Trying to save cellpy-file to {filename}\")\n logging.info(f\" -> {filename}\")\n\n if ensure_step_table is None:\n ensure_step_table = self.ensure_step_table\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n logging.info(\"Saving test failed!\")\n self._report_empty_dataset()\n return\n\n test = self.get_cell(dataset_number)\n summary_made = test.summary_made\n\n if not summary_made and not force:\n logging.info(\"You should not save datasets without making a summary first!\")\n logging.info(\"If you really want to do it, use save with force=True\")\n return\n\n step_table_made = test.steps_made\n if not step_table_made and not force and not ensure_step_table:\n logging.info(\n \"You should not save datasets without making a step-table first!\"\n )\n logging.info(\"If you really want to do it, use save with force=True\")\n return\n\n outfile_all = Path(filename)\n if not outfile_all.suffix:\n outfile_all = outfile_all.with_suffix(f\".{extension}\")\n\n if os.path.isfile(outfile_all):\n logging.debug(\"Outfile exists\")\n if overwrite:\n logging.debug(\"overwrite = True\")\n try:\n os.remove(outfile_all)\n except PermissionError as e:\n logging.critical(\"Could not over write old file\")\n logging.info(e)\n return\n else:\n logging.critical(\"Save (hdf5): file exist - did not save\", end=\" \")\n logging.info(outfile_all)\n return\n\n if ensure_step_table:\n logging.debug(\"ensure_step_table is on\")\n if not test.steps_made:\n logging.debug(\"save: creating step table\")\n self.make_step_table(dataset_number=dataset_number)\n\n # This method can probably be updated using pandas transpose trick\n logging.debug(\"trying to make infotable\")\n infotbl, fidtbl = self._create_infotable(dataset_number=dataset_number)\n\n root = prms._cellpyfile_root\n\n if CELLPY_FILE_VERSION > 4:\n raw_dir = prms._cellpyfile_raw\n step_dir = prms._cellpyfile_step\n summary_dir = prms._cellpyfile_summary\n meta_dir = \"/info\"\n fid_dir = prms._cellpyfile_fid\n\n else:\n raw_dir = \"/raw\"\n step_dir = \"/step_table\"\n summary_dir = \"/dfsummary\"\n meta_dir = \"/info\"\n fid_dir = \"/fidtable\"\n\n logging.debug(\"trying to save to hdf5\")\n txt = \"\\nHDF5 file: %s\" % outfile_all\n logging.debug(txt)\n\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n try:\n with pickle_protocol(4):\n store = pd.HDFStore(\n outfile_all,\n complib=prms._cellpyfile_complib,\n complevel=prms._cellpyfile_complevel,\n )\n\n logging.debug(\"trying to put raw data\")\n\n logging.debug(\" - lets set Data_Point as index\")\n\n hdr_data_point = self.headers_normal.data_point_txt\n\n if test.raw.index.name != hdr_data_point:\n test.raw = test.raw.set_index(hdr_data_point, drop=False)\n\n store.put(root + raw_dir, test.raw, format=prms._cellpyfile_raw_format)\n logging.debug(\" raw -> hdf5 OK\")\n\n logging.debug(\"trying to put summary\")\n store.put(\n root + summary_dir,\n test.summary,\n format=prms._cellpyfile_summary_format,\n )\n logging.debug(\" summary -> hdf5 OK\")\n\n logging.debug(\"trying to put meta data\")\n store.put(\n root + meta_dir, infotbl, format=prms._cellpyfile_infotable_format\n )\n logging.debug(\" meta -> hdf5 OK\")\n\n logging.debug(\"trying to put fidtable\")\n store.put(\n root + fid_dir, fidtbl, format=prms._cellpyfile_fidtable_format\n )\n logging.debug(\" fid -> hdf5 OK\")\n\n logging.debug(\"trying to put step\")\n try:\n store.put(\n root + step_dir,\n test.steps,\n format=prms._cellpyfile_stepdata_format,\n )\n logging.debug(\" step -> hdf5 OK\")\n except TypeError:\n test = self._fix_dtype_step_table(test)\n store.put(\n root + step_dir,\n test.steps,\n format=prms._cellpyfile_stepdata_format,\n )\n logging.debug(\" fixed step -> hdf5 OK\")\n\n # creating indexes\n # hdr_data_point = self.headers_normal.data_point_txt\n # hdr_cycle_steptable = self.headers_step_table.cycle\n # hdr_cycle_normal = self.headers_normal.cycle_index_txt\n\n # store.create_table_index(root + \"/raw\", columns=[hdr_data_point],\n # optlevel=9, kind='full')\n finally:\n store.close()\n logging.debug(\" all -> hdf5 OK\")\n warnings.simplefilter(\"default\", PerformanceWarning)\n # del store\n\n # --------------helper-functions--------------------------------------------\n def _fix_dtype_step_table(self, dataset):\n hst = get_headers_step_table()\n try:\n cols = dataset.steps.columns\n except AttributeError:\n logging.info(\"Could not extract columns from steps\")\n return\n for col in cols:\n if col not in [hst.cycle, hst.sub_step, hst.info]:\n dataset.steps[col] = dataset.steps[col].apply(pd.to_numeric)\n else:\n dataset.steps[col] = dataset.steps[col].astype(\"str\")\n return dataset\n\n # TODO: check if this is useful and if it is rename, if not delete\n def _cap_mod_summary(self, summary, capacity_modifier=\"reset\"):\n # modifies the summary table\n time_00 = time.time()\n discharge_title = self.headers_normal.discharge_capacity_txt\n charge_title = self.headers_normal.charge_capacity_txt\n chargecap = 0.0\n dischargecap = 0.0\n\n # TODO: @jepe - use pd.loc[row,column]\n\n if capacity_modifier == \"reset\":\n\n for index, row in summary.iterrows():\n dischargecap_2 = row[discharge_title]\n summary.loc[index, discharge_title] = dischargecap_2 - dischargecap\n dischargecap = dischargecap_2\n chargecap_2 = row[charge_title]\n summary.loc[index, charge_title] = chargecap_2 - chargecap\n chargecap = chargecap_2\n else:\n raise NotImplementedError\n\n logging.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n return summary\n\n # TODO: check if this is useful and if it is rename, if not delete\n def _cap_mod_normal(\n self, dataset_number=None, capacity_modifier=\"reset\", allctypes=True\n ):\n # modifies the normal table\n time_00 = time.time()\n logging.debug(\"Not properly checked yet! Use with caution!\")\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n cycle_index_header = self.headers_normal.cycle_index_txt\n step_index_header = self.headers_normal.step_index_txt\n discharge_index_header = self.headers_normal.discharge_capacity_txt\n discharge_energy_index_header = self.headers_normal.discharge_energy_txt\n charge_index_header = self.headers_normal.charge_capacity_txt\n charge_energy_index_header = self.headers_normal.charge_energy_txt\n\n raw = self.cells[dataset_number].raw\n\n chargecap = 0.0\n dischargecap = 0.0\n\n if capacity_modifier == \"reset\":\n # discharge cycles\n no_cycles = np.amax(raw[cycle_index_header])\n for j in range(1, no_cycles + 1):\n cap_type = \"discharge\"\n e_header = discharge_energy_index_header\n cap_header = discharge_index_header\n discharge_cycles = self.get_step_numbers(\n steptype=cap_type,\n allctypes=allctypes,\n cycle_number=j,\n dataset_number=dataset_number,\n )\n\n steps = discharge_cycles[j]\n txt = \"Cycle %i (discharge): \" % j\n logging.debug(txt)\n # TODO: @jepe - use pd.loc[row,column] e.g. pd.loc[:,\"charge_cap\"]\n # for col or pd.loc[(pd.[\"step\"]==1),\"x\"]\n selection = (raw[cycle_index_header] == j) & (\n raw[step_index_header].isin(steps)\n )\n c0 = raw[selection].iloc[0][cap_header]\n e0 = raw[selection].iloc[0][e_header]\n raw.loc[selection, cap_header] = raw.loc[selection, cap_header] - c0\n raw.loc[selection, e_header] = raw.loc[selection, e_header] - e0\n\n cap_type = \"charge\"\n e_header = charge_energy_index_header\n cap_header = charge_index_header\n charge_cycles = self.get_step_numbers(\n steptype=cap_type,\n allctypes=allctypes,\n cycle_number=j,\n dataset_number=dataset_number,\n )\n steps = charge_cycles[j]\n txt = \"Cycle %i (charge): \" % j\n logging.debug(txt)\n\n selection = (raw[cycle_index_header] == j) & (\n raw[step_index_header].isin(steps)\n )\n\n if any(selection):\n c0 = raw[selection].iloc[0][cap_header]\n e0 = raw[selection].iloc[0][e_header]\n raw.loc[selection, cap_header] = raw.loc[selection, cap_header] - c0\n raw.loc[selection, e_header] = raw.loc[selection, e_header] - e0\n logging.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n\n def get_number_of_tests(self):\n return self.number_of_datasets\n\n def get_mass(self, set_number=None):\n set_number = self._validate_dataset_number(set_number)\n if set_number is None:\n self._report_empty_dataset()\n return\n if not self.cells[set_number].mass_given:\n logging.info(\"No mass\")\n return self.cells[set_number].mass\n\n def get_cell(self, n=0):\n # TODO: remove me\n return self.cells[n]\n\n def sget_voltage(self, cycle, step, dataset_number=None):\n \"\"\"Returns voltage for cycle, step.\n\n Convenience function; same as issuing\n raw[(raw[cycle_index_header] == cycle) &\n (raw[step_index_header] == step)][voltage_header]\n\n Args:\n cycle: cycle number\n step: step number\n dataset_number: the dataset number (automatic selection if None)\n\n Returns:\n pandas.Series or None if empty\n \"\"\"\n header = self.headers_normal.voltage_txt\n return self._sget(\n cycle, step, header, usteps=False, dataset_number=dataset_number\n )\n\n def sget_current(self, cycle, step, dataset_number=None):\n \"\"\"Returns current for cycle, step.\n\n Convenience function; same as issuing\n raw[(raw[cycle_index_header] == cycle) &\n (raw[step_index_header] == step)][current_header]\n\n Args:\n cycle: cycle number\n step: step number\n dataset_number: the dataset number (automatic selection if None)\n\n Returns:\n pandas.Series or None if empty\n \"\"\"\n header = self.headers_normal.current_txt\n return self._sget(\n cycle, step, header, usteps=False, dataset_number=dataset_number\n )\n\n def get_voltage(self, cycle=None, dataset_number=None, full=True):\n \"\"\"Returns voltage (in V).\n\n Args:\n cycle: cycle number (all cycles if None)\n dataset_number: first dataset if None\n full: valid only for cycle=None (i.e. all cycles), returns the full\n pandas.Series if True, else a list of pandas.Series\n\n Returns:\n pandas.Series (or list of pandas.Series if cycle=None og full=False)\n \"\"\"\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n cycle_index_header = self.headers_normal.cycle_index_txt\n voltage_header = self.headers_normal.voltage_txt\n # step_index_header = self.headers_normal.step_index_txt\n\n test = self.cells[dataset_number].raw\n if cycle:\n logging.debug(\"getting voltage curve for cycle\")\n c = test[(test[cycle_index_header] == cycle)]\n if not self.is_empty(c):\n v = c[voltage_header]\n return v\n else:\n if not full:\n logging.debug(\"getting list of voltage-curves for all cycles\")\n v = []\n no_cycles = np.amax(test[cycle_index_header])\n for j in range(1, no_cycles + 1):\n txt = \"Cycle %i: \" % j\n logging.debug(txt)\n c = test[(test[cycle_index_header] == j)]\n v.append(c[voltage_header])\n else:\n logging.debug(\"getting frame of all voltage-curves\")\n v = test[voltage_header]\n return v\n\n def get_current(self, cycle=None, dataset_number=None, full=True):\n \"\"\"Returns current (in mA).\n\n Args:\n cycle: cycle number (all cycles if None)\n dataset_number: first dataset if None\n full: valid only for cycle=None (i.e. all cycles), returns the full\n pandas.Series if True, else a list of pandas.Series\n\n Returns:\n pandas.Series (or list of pandas.Series if cycle=None og full=False)\n \"\"\"\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n cycle_index_header = self.headers_normal.cycle_index_txt\n current_header = self.headers_normal.current_txt\n # step_index_header = self.headers_normal.step_index_txt\n\n test = self.cells[dataset_number].raw\n if cycle:\n logging.debug(f\"getting current for cycle {cycle}\")\n c = test[(test[cycle_index_header] == cycle)]\n if not self.is_empty(c):\n v = c[current_header]\n return v\n else:\n if not full:\n logging.debug(\"getting a list of current-curves for all cycles\")\n v = []\n no_cycles = np.amax(test[cycle_index_header])\n for j in range(1, no_cycles + 1):\n txt = \"Cycle %i: \" % j\n logging.debug(txt)\n c = test[(test[cycle_index_header] == j)]\n v.append(c[current_header])\n else:\n logging.debug(\"getting all current-curves \")\n v = test[current_header]\n return v\n\n def sget_steptime(self, cycle, step, dataset_number=None):\n \"\"\"Returns step time for cycle, step.\n\n Convenience function; same as issuing\n raw[(raw[cycle_index_header] == cycle) &\n (raw[step_index_header] == step)][step_time_header]\n\n Args:\n cycle: cycle number\n step: step number\n dataset_number: the dataset number (automatic selection if None)\n\n Returns:\n pandas.Series or None if empty\n \"\"\"\n\n header = self.headers_normal.step_time_txt\n return self._sget(\n cycle, step, header, usteps=False, dataset_number=dataset_number\n )\n\n def _sget(self, cycle, step, header, usteps=False, dataset_number=None):\n dataset_number = self._validate_dataset_number(dataset_number)\n logging.debug(f\"searching for {header}\")\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n cycle_index_header = self.headers_normal.cycle_index_txt\n step_index_header = self.headers_normal.step_index_txt\n\n if usteps:\n print(\"Using sget for usteps is not supported yet.\")\n print(\"I encourage you to work with the DataFrames directly instead.\")\n print(\" - look up the 'ustep' in the steps DataFrame\")\n print(\" - get the start and end 'data_point'\")\n print(\" - look up the start and end 'data_point' in the raw DataFrame\")\n print(\"\")\n print(\n \"(Just remember to run make_step_table with the all_steps set to True before you do it)\"\n )\n return\n\n test = self.cells[dataset_number].raw\n\n if not isinstance(step, (list, tuple)):\n step = [step]\n\n return test.loc[\n (test[cycle_index_header] == cycle) & (test[step_index_header].isin(step)),\n header,\n ].reset_index(drop=True)\n\n def sget_timestamp(self, cycle, step, dataset_number=None):\n \"\"\"Returns timestamp for cycle, step.\n\n Convenience function; same as issuing\n raw[(raw[cycle_index_header] == cycle) &\n (raw[step_index_header] == step)][timestamp_header]\n\n Args:\n cycle: cycle number\n step: step number (can be a list of several step numbers)\n dataset_number: the dataset number (automatic selection if None)\n\n Returns:\n pandas.Series\n \"\"\"\n\n header = self.headers_normal.test_time_txt\n return self._sget(\n cycle, step, header, usteps=False, dataset_number=dataset_number\n )\n\n def sget_step_numbers(self, cycle, step, dataset_number=None):\n \"\"\"Returns step number for cycle, step.\n\n Convenience function; same as issuing\n raw[(raw[cycle_index_header] == cycle) &\n (raw[step_index_header] == step)][step_index_header]\n\n Args:\n cycle: cycle number\n step: step number (can be a list of several step numbers)\n dataset_number: the dataset number (automatic selection if None)\n\n Returns:\n pandas.Series\n \"\"\"\n\n header = self.headers_normal.step_index_txt\n return self._sget(\n cycle, step, header, usteps=False, dataset_number=dataset_number\n )\n\n def get_datetime(self, cycle=None, dataset_number=None, full=True):\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n cycle_index_header = self.headers_normal.cycle_index_txt\n datetime_header = self.headers_normal.datetime_txt\n\n v = pd.Series()\n test = self.cells[dataset_number].raw\n if cycle:\n c = test[(test[cycle_index_header] == cycle)]\n if not self.is_empty(c):\n v = c[datetime_header]\n\n else:\n if not full:\n logging.debug(\"getting datetime for all cycles\")\n v = []\n cycles = self.get_cycle_numbers()\n for j in cycles:\n txt = \"Cycle %i: \" % j\n logging.debug(txt)\n c = test[(test[cycle_index_header] == j)]\n v.append(c[datetime_header])\n else:\n logging.debug(\"returning full datetime col\")\n v = test[datetime_header]\n return v\n\n def get_timestamp(\n self, cycle=None, dataset_number=None, in_minutes=False, full=True\n ):\n \"\"\"Returns timestamps (in sec or minutes (if in_minutes==True)).\n\n Args:\n cycle: cycle number (all if None)\n dataset_number: first dataset if None\n in_minutes: return values in minutes instead of seconds if True\n full: valid only for cycle=None (i.e. all cycles), returns the full\n pandas.Series if True, else a list of pandas.Series\n\n Returns:\n pandas.Series (or list of pandas.Series if cycle=None og full=False)\n \"\"\"\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n cycle_index_header = self.headers_normal.cycle_index_txt\n timestamp_header = self.headers_normal.test_time_txt\n\n v = pd.Series()\n test = self.cells[dataset_number].raw\n if cycle:\n c = test[(test[cycle_index_header] == cycle)]\n if not self.is_empty(c):\n v = c[timestamp_header]\n\n else:\n if not full:\n logging.debug(\"getting timestapm for all cycles\")\n v = []\n cycles = self.get_cycle_numbers()\n for j in cycles:\n txt = \"Cycle %i: \" % j\n logging.debug(txt)\n c = test[(test[cycle_index_header] == j)]\n v.append(c[timestamp_header])\n else:\n logging.debug(\"returning full timestamp col\")\n v = test[timestamp_header]\n if in_minutes and v is not None:\n v /= 60.0\n if in_minutes and v is not None:\n v /= 60.0\n return v\n\n def get_dcap(self, cycle=None, dataset_number=None, converter=None, **kwargs):\n \"\"\"Returns discharge_capacity (in mAh/g), and voltage.\"\"\"\n\n # TODO - jepe: should return a DataFrame as default\n # but remark that we then have to update e.g. batch_helpers.py\n # TODO - jepe: change needed: should not use\n # dataset_number as parameter\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n if converter is None:\n converter = self.get_converter_to_specific()\n\n dc, v = self._get_cap(\n cycle, dataset_number, \"discharge\", converter=converter, **kwargs\n )\n return dc, v\n\n def get_ccap(self, cycle=None, dataset_number=None, converter=None, **kwargs):\n \"\"\"Returns charge_capacity (in mAh/g), and voltage.\"\"\"\n\n # TODO - jepe: should return a DataFrame as default\n # but remark that we then have to update e.g. batch_helpers.py\n # TODO - jepe: change needed: should not use\n # dataset_number as parameter\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n if converter is None:\n converter = self.get_converter_to_specific()\n cc, v = self._get_cap(\n cycle, dataset_number, \"charge\", converter=converter, **kwargs\n )\n return cc, v\n\n def get_cap(\n self,\n cycle=None,\n dataset_number=None,\n method=\"back-and-forth\",\n insert_nan=None,\n shift=0.0,\n categorical_column=False,\n label_cycle_number=False,\n split=False,\n interpolated=False,\n dx=0.1,\n number_of_points=None,\n ignore_errors=True,\n dynamic=False,\n inter_cycle_shift=True,\n **kwargs,\n ):\n \"\"\"Gets the capacity for the run.\n\n Args:\n cycle (int): cycle number.\n method (string): how the curves are given\n \"back-and-forth\" - standard back and forth; discharge\n (or charge) reversed from where charge (or discharge) ends.\n \"forth\" - discharge (or charge) continues along x-axis.\n \"forth-and-forth\" - discharge (or charge) also starts at 0\n (or shift if not shift=0.0)\n insert_nan (bool): insert a np.nan between the charge and discharge curves.\n Defaults to True for \"forth-and-forth\", else False\n shift: start-value for charge (or discharge) (typically used when\n plotting shifted-capacity).\n categorical_column: add a categorical column showing if it is\n charge or discharge.\n dataset_number (int): test number (default first)\n (usually not used).\n label_cycle_number (bool): add column for cycle number\n (tidy format).\n split (bool): return a list of c and v instead of the default\n that is to return them combined in a DataFrame. This is only\n possible for some specific combinations of options (neither\n categorical_column=True or label_cycle_number=True are\n allowed).\n interpolated (bool): set to True if you would like to get\n interpolated data (typically if you want to save disk space\n or memory). Defaults to False.\n dx (float): the step used when interpolating.\n number_of_points (int): number of points to use (over-rides dx)\n for interpolation (i.e. the length of the interpolated data).\n ignore_errors (bool): don't break out of loop if an error occurs.\n dynamic: for dynamic retrieving data from cellpy-file.\n [NOT IMPLEMENTED YET]\n inter_cycle_shift (bool): cumulative shifts between consecutive\n cycles. Defaults to True.\n\n Returns:\n pandas.DataFrame ((cycle) voltage, capacity, (direction (-1, 1)))\n unless split is explicitly set to True. Then it returns a tuple\n with capacity (mAh/g) and voltage.\n \"\"\"\n\n # TODO: allow for fixing the interpolation range (so that it is possible\n # to run the function on several cells and have a common x-axis\n\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n # if cycle is not given, then this function should\n # iterate through cycles\n if cycle is None:\n cycle = self.get_cycle_numbers()\n\n if not isinstance(cycle, collections.abc.Iterable):\n cycle = [cycle]\n\n if split and not (categorical_column or label_cycle_number):\n return_dataframe = False\n else:\n return_dataframe = True\n\n method = method.lower()\n if method not in [\"back-and-forth\", \"forth\", \"forth-and-forth\"]:\n warnings.warn(\n f\"method '{method}' is not a valid option \"\n f\"- setting to 'back-and-forth'\"\n )\n method = \"back-and-forth\"\n\n if insert_nan is None:\n if method == \"forth-and-forth\":\n insert_nan = True\n else:\n insert_nan = False\n\n capacity = None\n voltage = None\n specific_converter = self.get_converter_to_specific()\n cycle_df = pd.DataFrame()\n\n initial = True\n for current_cycle in cycle:\n error = False\n try:\n cc, cv = self.get_ccap(\n current_cycle,\n dataset_number,\n converter=specific_converter,\n **kwargs,\n )\n dc, dv = self.get_dcap(\n current_cycle,\n dataset_number,\n converter=specific_converter,\n **kwargs,\n )\n\n except NullData as e:\n error = True\n logging.debug(e)\n if not ignore_errors:\n logging.debug(\"breaking out of loop\")\n break\n if not error:\n if cc.empty:\n logging.debug(\"get_ccap returns empty cc Series\")\n\n if dc.empty:\n logging.debug(\"get_ccap returns empty dc Series\")\n\n if initial:\n prev_end = shift\n initial = False\n if self.cycle_mode == \"anode\":\n first_interpolation_direction = -1\n _first_step_c = dc\n _first_step_v = dv\n last_interpolation_direction = 1\n _last_step_c = cc\n _last_step_v = cv\n else:\n first_interpolation_direction = 1\n _first_step_c = cc\n _first_step_v = cv\n last_interpolation_direction = -1\n _last_step_c = dc\n _last_step_v = dv\n\n if method == \"back-and-forth\":\n # _last = np.amax(_first_step_c)\n _last = _first_step_c.iat[-1]\n # should change amax to last point\n _first = None\n _new_first = None\n if not inter_cycle_shift:\n prev_end = 0.0\n if _last_step_c is not None:\n _last_step_c = _last - _last_step_c + prev_end\n else:\n logging.debug(\"no last charge step found\")\n if _first_step_c is not None:\n _first = _first_step_c.iat[0]\n _first_step_c += prev_end\n _new_first = _first_step_c.iat[0]\n else:\n logging.debug(\"probably empty (_first_step_c is None)\")\n # logging.debug(f\"current shifts used: prev_end = {prev_end}\")\n # logging.debug(f\"shifting start from {_first} to \"\n # f\"{_new_first}\")\n\n # prev_end = np.amin(_last_step_c)\n prev_end = _last_step_c.iat[-1]\n elif method == \"forth\":\n # _last = np.amax(_first_step_c)\n _last = _first_step_c.iat[-1]\n if _last_step_c is not None:\n _last_step_c += _last + prev_end\n else:\n logging.debug(\"no last charge step found\")\n if _first_step_c is not None:\n _first_step_c += prev_end\n else:\n logging.debug(\"no first charge step found\")\n\n # prev_end = np.amax(_last_step_c)\n prev_end = _last_step_c.iat[-1]\n\n elif method == \"forth-and-forth\":\n if _last_step_c is not None:\n _last_step_c += shift\n else:\n logging.debug(\"no last charge step found\")\n if _first_step_c is not None:\n _first_step_c += shift\n else:\n logging.debug(\"no first charge step found\")\n\n if return_dataframe:\n\n try:\n _first_df = pd.DataFrame(\n {\"voltage\": _first_step_v, \"capacity\": _first_step_c,}\n )\n if interpolated:\n _first_df = interpolate_y_on_x(\n _first_df,\n y=\"capacity\",\n x=\"voltage\",\n dx=dx,\n number_of_points=number_of_points,\n direction=first_interpolation_direction,\n )\n if insert_nan:\n _nan = pd.DataFrame(\n {\"capacity\": [np.nan], \"voltage\": [np.nan]}\n )\n _first_df = _first_df.append(_nan)\n if categorical_column:\n _first_df[\"direction\"] = -1\n\n _last_df = pd.DataFrame(\n {\n \"voltage\": _last_step_v.values,\n \"capacity\": _last_step_c.values,\n }\n )\n if interpolated:\n _last_df = interpolate_y_on_x(\n _last_df,\n y=\"capacity\",\n x=\"voltage\",\n dx=dx,\n number_of_points=number_of_points,\n direction=last_interpolation_direction,\n )\n if insert_nan:\n _last_df = _last_df.append(_nan)\n if categorical_column:\n _last_df[\"direction\"] = 1\n\n except AttributeError:\n logging.info(f\"Could not extract cycle {current_cycle}\")\n else:\n c = pd.concat([_first_df, _last_df], axis=0)\n if label_cycle_number:\n c.insert(0, \"cycle\", current_cycle)\n # c[\"cycle\"] = current_cycle\n # c = c[[\"cycle\", \"voltage\", \"capacity\", \"direction\"]]\n if cycle_df.empty:\n cycle_df = c\n else:\n cycle_df = pd.concat([cycle_df, c], axis=0)\n\n else:\n logging.warning(\"returning non-dataframe\")\n c = pd.concat([_first_step_c, _last_step_c], axis=0)\n v = pd.concat([_first_step_v, _last_step_v], axis=0)\n\n capacity = pd.concat([capacity, c], axis=0)\n voltage = pd.concat([voltage, v], axis=0)\n\n if return_dataframe:\n return cycle_df\n else:\n return capacity, voltage\n\n def _get_cap(\n self,\n cycle=None,\n dataset_number=None,\n cap_type=\"charge\",\n trim_taper_steps=None,\n steps_to_skip=None,\n steptable=None,\n converter=None,\n ):\n # used when extracting capacities (get_ccap, get_dcap)\n # TODO: @jepe - does not allow for constant voltage yet?\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n test = self.cells[\n dataset_number\n ] # not used anymore - will be removed when we skip several cells option\n\n if cap_type == \"charge_capacity\":\n cap_type = \"charge\"\n elif cap_type == \"discharge_capacity\":\n cap_type = \"discharge\"\n\n cycles = self.get_step_numbers(\n steptype=cap_type,\n allctypes=False,\n cycle_number=cycle,\n dataset_number=dataset_number,\n trim_taper_steps=trim_taper_steps,\n steps_to_skip=steps_to_skip,\n steptable=steptable,\n )\n\n if cap_type == \"charge\":\n column_txt = self.headers_normal.charge_capacity_txt\n else:\n column_txt = self.headers_normal.discharge_capacity_txt\n if cycle:\n steps = cycles[cycle]\n _v = []\n _c = []\n\n for step in sorted(steps):\n selected_step = self._select_step(cycle, step, dataset_number)\n if not self.is_empty(selected_step):\n _v.append(selected_step[self.headers_normal.voltage_txt])\n _c.append(selected_step[column_txt] * converter)\n try:\n voltage = pd.concat(_v, axis=0)\n cap = pd.concat(_c, axis=0)\n except:\n logging.debug(\"could not find any steps for this cycle\")\n raise NullData(f\"no steps found (c:{cycle} s:{step} type:{cap_type})\")\n else:\n # get all the discharge cycles\n # this is a dataframe filtered on step and cycle\n # This functionality is not crucial since get_cap (that uses this method) has it\n # (but it might be nice to improve performance)\n raise NotImplementedError(\n \"Not yet possible to extract without giving cycle numbers (use get_cap instead)\"\n )\n\n return cap, voltage\n\n def get_ocv(\n self,\n cycles=None,\n direction=\"up\",\n remove_first=False,\n interpolated=False,\n dx=None,\n number_of_points=None,\n ):\n\n \"\"\"get the open circuit voltage relaxation curves.\n\n Args:\n cycles (list of ints or None): the cycles to extract from\n (selects all if not given).\n direction (\"up\", \"down\", or \"both\"): extract only relaxations that\n is performed during discharge for \"up\" (because then the\n voltage relaxes upwards) etc.\n remove_first: remove the first relaxation curve (typically,\n the first curve is from the initial rest period between\n assembling the cell to the actual testing/cycling starts)\n interpolated (bool): set to True if you want the data to be\n interpolated (e.g. for creating smaller files)\n dx (float): the step used when interpolating.\n number_of_points (int): number of points to use (over-rides dx)\n for interpolation (i.e. the length of the interpolated data).\n\n Returns:\n A pandas.DataFrame with cycle-number, step-number, step-time, and\n voltage columns.\n \"\"\"\n\n if cycles is None:\n cycles = self.get_cycle_numbers()\n else:\n if not isinstance(cycles, (list, tuple, np.ndarray)):\n cycles = [cycles]\n else:\n remove_first = False\n\n ocv_rlx_id = \"ocvrlx\"\n if direction == \"up\":\n ocv_rlx_id += \"_up\"\n elif direction == \"down\":\n ocv_rlx_id += \"_down\"\n\n steps = self.cell.steps\n raw = self.cell.raw\n\n ocv_steps = steps.loc[steps[\"cycle\"].isin(cycles), :]\n\n ocv_steps = ocv_steps.loc[\n ocv_steps.type.str.startswith(ocv_rlx_id, na=False), :\n ]\n\n if remove_first:\n ocv_steps = ocv_steps.iloc[1:, :]\n\n step_time_label = self.headers_normal.step_time_txt\n voltage_label = self.headers_normal.voltage_txt\n cycle_label = self.headers_normal.cycle_index_txt\n step_label = self.headers_normal.step_index_txt\n\n selected_df = raw.where(\n raw[cycle_label].isin(ocv_steps.cycle)\n & raw[step_label].isin(ocv_steps.step)\n ).dropna()\n\n selected_df = selected_df.loc[\n :, [cycle_label, step_label, step_time_label, voltage_label]\n ]\n\n if interpolated:\n if dx is None and number_of_points is None:\n dx = prms.Reader.time_interpolation_step\n new_dfs = list()\n groupby_list = [cycle_label, step_label]\n\n for name, group in selected_df.groupby(groupby_list):\n new_group = interpolate_y_on_x(\n group,\n x=step_time_label,\n y=voltage_label,\n dx=dx,\n number_of_points=number_of_points,\n )\n\n for i, j in zip(groupby_list, name):\n new_group[i] = j\n new_dfs.append(new_group)\n\n selected_df = pd.concat(new_dfs)\n\n return selected_df\n\n def get_number_of_cycles(self, dataset_number=None, steptable=None):\n \"\"\"Get the number of cycles in the test.\"\"\"\n if steptable is None:\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n d = self.cells[dataset_number].raw\n no_cycles = np.amax(d[self.headers_normal.cycle_index_txt])\n else:\n no_cycles = np.amax(steptable[self.headers_step_table.cycle])\n return no_cycles\n\n def get_cycle_numbers_old(self, dataset_number=None, steptable=None):\n \"\"\"Get a list containing all the cycle numbers in the test.\"\"\"\n logging.debug(\"getting cycle numbers\")\n if steptable is None:\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n d = self.cells[dataset_number].raw\n cycles = d[self.headers_normal.cycle_index_txt].dropna().unique()\n else:\n logging.debug(\"steptable is not none\")\n cycles = steptable[self.headers_step_table.cycle].dropna().unique()\n logging.debug(f\"got {len(cycles)} cycle numbers\")\n return cycles\n\n def get_cycle_numbers(\n self,\n dataset_number=None,\n steptable=None,\n rate=None,\n rate_on=None,\n rate_std=None,\n rate_column=None,\n inverse=False,\n ):\n \"\"\"Get a list containing all the cycle numbers in the test.\n\n Parameters:\n rate (float): the rate to filter on. Remark that it should be given\n as a float, i.e. you will have to convert from C-rate to\n the actual numeric value. For example, use rate=0.05 if you want\n to filter on cycles that has a C/20 rate.\n rate_on (str): only select cycles if based on the rate of this step-type (e.g. on=\"charge\").\n rate_std (float): allow for this inaccuracy in C-rate when selecting cycles\n rate_column (str): column header name of the rate column,\n inverse (bool): select steps that does not have the given C-rate.\n\n Returns:\n numpy.ndarray of cycle numbers.\n \"\"\"\n\n logging.debug(\"getting cycle numbers\")\n if steptable is None:\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n d = self.cells[dataset_number].raw\n cycles = d[self.headers_normal.cycle_index_txt].dropna().unique()\n steptable = self.cells[dataset_number].steps\n else:\n logging.debug(\"steptable is given as input parameter\")\n cycles = steptable[self.headers_step_table.cycle].dropna().unique()\n\n if rate is None:\n return cycles\n\n logging.debug(\"filtering on rate\")\n if rate_on is None:\n rate_on = [\"charge\"]\n else:\n if not isinstance(rate_on, (list, tuple)):\n rate_on = [rate_on]\n\n if rate_column is None:\n rate_column = self.headers_step_table[\"rate_avr\"]\n\n if rate_on:\n on_column = self.headers_step_table[\"type\"]\n\n if rate is None:\n rate = 0.05\n\n if rate_std is None:\n rate_std = 0.1 * rate\n\n if rate_on:\n cycles_mask = (\n (steptable[rate_column] < (rate + rate_std))\n & (steptable[rate_column] > (rate - rate_std))\n & (steptable[on_column].isin(rate_on))\n )\n else:\n cycles_mask = (steptable[rate_column] < (rate + rate_std)) & (\n steptable[rate_column] > (rate - rate_std)\n )\n\n if inverse:\n cycles_mask = ~cycles_mask\n\n filtered_step_table = steptable[cycles_mask]\n filtered_cycles = filtered_step_table[self.headers_step_table[\"cycle\"]].unique()\n\n return filtered_cycles\n\n def get_ir(self, dataset_number=None):\n \"\"\"Get the IR data (Deprecated).\"\"\"\n raise DeprecatedFeature\n\n def get_converter_to_specific(\n self, dataset=None, mass=None, to_unit=None, from_unit=None\n ):\n \"\"\"get the conversion values\n\n Args:\n dataset: DataSet object\n mass: mass of electrode (for example active material in mg)\n to_unit: (float) unit of input, f.ex. if unit of charge\n is mAh and unit of mass is g, then to_unit for charge/mass\n will be 0.001 / 1.0 = 0.001\n from_unit: float) unit of output, f.ex. if unit of charge\n is mAh and unit of mass is g, then to_unit for charge/mass\n will be 1.0 / 0.001 = 1000.0\n\n Returns:\n multiplier (float) from_unit/to_unit * mass\n\n \"\"\"\n\n if not dataset:\n dataset_number = self._validate_dataset_number(None)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n dataset = self.cells[dataset_number]\n\n if not mass:\n mass = dataset.mass\n\n if not to_unit:\n to_unit_cap = self.cellpy_units[\"charge\"]\n to_unit_mass = self.cellpy_units[\"specific\"]\n to_unit = to_unit_cap / to_unit_mass\n if not from_unit:\n from_unit_cap = self.raw_units[\"charge\"]\n from_unit_mass = self.raw_units[\"mass\"]\n from_unit = from_unit_cap / from_unit_mass\n logging.debug(f\"from-unit: {from_unit}\")\n logging.debug(f\"to-unit: {to_unit}\")\n logging.debug(f\"mass: {mass}\")\n conversion_factor = from_unit / to_unit / mass\n logging.debug(f\"conversion factor: {conversion_factor}\")\n\n return conversion_factor\n\n def get_diagnostics_plots(self, dataset_number=None, scaled=False):\n raise DeprecatedFeature(\n \"This feature is deprecated. \"\n \"Extract diagnostics from the summary instead.\"\n )\n\n def _set_mass(self, dataset_number, value):\n try:\n self.cells[dataset_number].mass = value\n self.cells[dataset_number].mass_given = True\n except AttributeError as e:\n logging.info(\"This test is empty\")\n logging.info(e)\n\n def _set_tot_mass(self, dataset_number, value):\n try:\n self.cells[dataset_number].tot_mass = value\n except AttributeError as e:\n logging.info(\"This test is empty\")\n logging.info(e)\n\n def _set_nom_cap(self, dataset_number, value):\n try:\n self.cells[dataset_number].nom_cap = value\n except AttributeError as e:\n logging.info(\"This test is empty\")\n logging.info(e)\n\n def _set_run_attribute(self, attr, vals, dataset_number=None, validated=None):\n # Sets the val (vals) for the test (datasets).\n # Remark! This is left-over code from old ages when we thought we needed\n # to have data-sets with multiple cells. And before we learned about\n # setters and getters in Python. Feel free to refactor it.\n\n if attr == \"mass\":\n setter = self._set_mass\n elif attr == \"tot_mass\":\n setter = self._set_tot_mass\n elif attr == \"nom_cap\":\n setter = self._set_nom_cap\n\n number_of_tests = len(self.cells)\n if not number_of_tests:\n logging.info(\"No datasets have been loaded yet\")\n logging.info(f\"Cannot set {attr} before loading datasets\")\n sys.exit(-1)\n\n if not dataset_number:\n dataset_number = list(range(len(self.cells)))\n\n if not self._is_listtype(dataset_number):\n dataset_number = [dataset_number]\n\n if not self._is_listtype(vals):\n vals = [vals]\n if validated is None:\n for t, m in zip(dataset_number, vals):\n setter(t, m)\n else:\n for t, m, v in zip(dataset_number, vals, validated):\n if v:\n setter(t, m)\n else:\n logging.debug(\"_set_run_attribute: this set is empty\")\n\n def set_mass(self, masses, dataset_number=None, validated=None):\n \"\"\"Sets the mass (masses) for the test (datasets).\n \"\"\"\n self._set_run_attribute(\n \"mass\", masses, dataset_number=dataset_number, validated=validated\n )\n\n def set_tot_mass(self, masses, dataset_number=None, validated=None):\n \"\"\"Sets the mass (masses) for the test (datasets).\n \"\"\"\n self._set_run_attribute(\n \"tot_mass\", masses, dataset_number=dataset_number, validated=validated\n )\n\n def set_nom_cap(self, nom_caps, dataset_number=None, validated=None):\n \"\"\"Sets the mass (masses) for the test (datasets).\n \"\"\"\n self._set_run_attribute(\n \"nom_cap\", nom_caps, dataset_number=dataset_number, validated=validated\n )\n\n @staticmethod\n def set_col_first(df, col_names):\n \"\"\"set selected columns first in a pandas.DataFrame.\n\n This function sets cols with names given in col_names (a list) first in\n the DataFrame. The last col in col_name will come first (processed last)\n \"\"\"\n\n column_headings = df.columns\n column_headings = column_headings.tolist()\n try:\n for col_name in col_names:\n i = column_headings.index(col_name)\n column_headings.pop(column_headings.index(col_name))\n column_headings.insert(0, col_name)\n\n finally:\n df = df.reindex(columns=column_headings)\n return df\n\n def set_dataset_number_force(self, dataset_number=0):\n \"\"\"Force to set testnumber.\n\n Sets the DataSet number default (all functions with prm dataset_number\n will then be run assuming the default set dataset_number)\n \"\"\"\n self.selected_cell_number = dataset_number\n\n def set_cellnumber(self, dataset_number):\n \"\"\"Set the cell number.\n\n Set the cell number that will be used\n (CellpyData.selected_dataset_number).\n The class can save several datasets (but its not a frequently used\n feature), the datasets are stored in a list and dataset_number is the\n selected index in the list.\n\n Several options are available:\n n - int in range 0..(len-1) (python uses offset as index, i.e.\n starts with 0)\n last, end, newest - last (index set to -1)\n first, zero, beginning, default - first (index set to 0)\n \"\"\"\n warnings.warn(\"Deprecated\", DeprecationWarning)\n logging.debug(\"***set_testnumber(n)\")\n if not isinstance(dataset_number, int):\n dataset_number_txt = dataset_number\n try:\n if dataset_number_txt.lower() in [\"last\", \"end\", \"newest\"]:\n dataset_number = -1\n elif dataset_number_txt.lower() in [\n \"first\",\n \"zero\",\n \"beginning\",\n \"default\",\n ]:\n dataset_number = 0\n except Exception as e:\n logging.debug(\"assuming numeric\")\n warnings.warn(f\"Unhandled exception raised: {e}\")\n\n number_of_tests = len(self.cells)\n if dataset_number >= number_of_tests:\n dataset_number = -1\n logging.debug(\"you dont have that many datasets, setting to last test\")\n elif dataset_number < -1:\n logging.debug(\"not a valid option, setting to first test\")\n dataset_number = 0\n self.selected_cell_number = dataset_number\n\n # TODO: deprecate this\n def get_summary(self, dataset_number=None, use_summary_made=False):\n \"\"\"Retrieve summary returned as a pandas DataFrame.\"\"\"\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return None\n\n test = self.get_cell(dataset_number)\n\n # This is a bit convoluted; in the old days, we used an attribute\n # called summary_made,\n # that was set to True when the summary was made successfully.\n # It is most likely never\n # used anymore. And will most probably be deleted.\n if use_summary_made:\n summary_made = test.summary_made\n else:\n summary_made = True\n\n if not summary_made:\n warnings.warn(\"Summary is not made yet\")\n return None\n else:\n logging.info(\"Returning datasets[test_no].summary\")\n return test.summary\n\n # -----------internal-helpers-----------------------------------------------\n\n # TODO: clean it up a bit\n @staticmethod\n def is_empty(v):\n try:\n if not v:\n return True\n else:\n return False\n except Exception:\n try:\n if v.empty:\n return True\n else:\n return False\n except Exception:\n if v.isnull:\n return False\n else:\n return True\n\n @staticmethod\n def _is_listtype(x):\n if isinstance(x, (list, tuple)):\n return True\n else:\n return False\n\n @staticmethod\n def _check_file_type(filename):\n warnings.warn(DeprecationWarning(\"this method will be removed \" \"in v.0.4.0\"))\n extension = os.path.splitext(filename)[-1]\n filetype = \"res\"\n if extension.lower() == \".res\":\n filetype = \"res\"\n elif extension.lower() == \".h5\":\n filetype = \"h5\"\n return filetype\n\n @staticmethod\n def _bounds(x):\n return np.amin(x), np.amax(x)\n\n @staticmethod\n def _roundup(x):\n n = 1000.0\n x = np.ceil(x * n)\n x /= n\n return x\n\n def _rounddown(self, x):\n x = self._roundup(-x)\n x = -x\n return x\n\n @staticmethod\n def _reverse(x):\n x = x[::-1]\n # x = x.sort_index(ascending=True)\n return x\n\n def _select_y(self, x, y, points):\n # uses interpolation to select y = f(x)\n min_x, max_x = self._bounds(x)\n if x[0] > x[-1]:\n # need to reverse\n x = self._reverse(x)\n y = self._reverse(y)\n f = interpolate.interp1d(y, x)\n y_new = f(points)\n return y_new\n\n def _select_last(self, raw):\n # this function gives a set of indexes pointing to the last\n # datapoints for each cycle in the dataset\n\n c_txt = self.headers_normal.cycle_index_txt\n d_txt = self.headers_normal.data_point_txt\n steps = []\n unique_steps = raw[c_txt].unique()\n max_step = max(raw[c_txt])\n for j in range(int(max_step)):\n if j + 1 not in unique_steps:\n logging.debug(f\"Warning: Cycle {j + 1} is missing!\")\n else:\n last_item = max(raw.loc[raw[c_txt] == j + 1, d_txt])\n steps.append(last_item)\n\n last_items = raw[d_txt].isin(steps)\n return last_items\n\n # TODO: find out what this is for and probably delete it\n def _modify_cycle_number_using_cycle_step(\n self, from_tuple=None, to_cycle=44, dataset_number=None\n ):\n # modify step-cycle tuple to new step-cycle tuple\n # from_tuple = [old cycle_number, old step_number]\n # to_cycle = new cycle_number\n\n if from_tuple is None:\n from_tuple = [1, 4]\n logging.debug(\"**- _modify_cycle_step\")\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n cycle_index_header = self.headers_normal.cycle_index_txt\n step_index_header = self.headers_normal.step_index_txt\n\n step_table_txt_cycle = self.headers_step_table.cycle\n step_table_txt_step = self.headers_step_table.step\n\n # modifying steps\n st = self.cells[dataset_number].steps\n st[step_table_txt_cycle][\n (st[step_table_txt_cycle] == from_tuple[0])\n & (st[step_table_txt_step] == from_tuple[1])\n ] = to_cycle\n # modifying normal_table\n nt = self.cells[dataset_number].raw\n nt[cycle_index_header][\n (nt[cycle_index_header] == from_tuple[0])\n & (nt[step_index_header] == from_tuple[1])\n ] = to_cycle\n # modifying summary_table\n # not implemented yet\n\n # ----------making-summary------------------------------------------------------\n def make_summary(\n self,\n find_ocv=False,\n find_ir=False,\n find_end_voltage=True,\n use_cellpy_stat_file=None,\n all_tests=True,\n dataset_number=0,\n ensure_step_table=True,\n add_normalized_cycle_index=True,\n add_c_rate=True,\n normalization_cycles=None,\n nom_cap=None,\n from_cycle=None,\n ):\n \"\"\"Convenience function that makes a summary of the cycling data.\"\"\"\n\n # TODO: @jepe - include option for omitting steps\n # TODO: @jepe - make it is possible to update only new data by implementing\n # from_cycle (only calculate summary from a given cycle number).\n # Probably best to keep the old summary and make\n # a new one for the rest, then use pandas.concat to merge them.\n # Might have to create the culumative cols etc after merging?\n\n # first - check if we need some \"instrument-specific\" prms\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n\n if ensure_step_table is None:\n ensure_step_table = self.ensure_step_table\n\n if use_cellpy_stat_file is None:\n use_cellpy_stat_file = prms.Reader.use_cellpy_stat_file\n logging.debug(\"using use_cellpy_stat_file from prms\")\n logging.debug(f\"use_cellpy_stat_file: {use_cellpy_stat_file}\")\n\n if all_tests is True:\n for j in range(len(self.cells)):\n txt = \"creating summary for file \"\n test = self.cells[j]\n if not self._is_not_empty_dataset(test):\n logging.info(f\"Empty test {j})\")\n return\n if isinstance(test.loaded_from, (list, tuple)):\n for f in test.loaded_from:\n txt += f\"{f}\\n\"\n else:\n txt += str(test.loaded_from)\n\n if not test.mass_given:\n txt += f\" mass for test {j} is not given\"\n txt += f\" setting it to {test.mass} mg\"\n logging.debug(txt)\n\n self._make_summary(\n j,\n find_ocv=find_ocv,\n find_ir=find_ir,\n find_end_voltage=find_end_voltage,\n use_cellpy_stat_file=use_cellpy_stat_file,\n ensure_step_table=ensure_step_table,\n add_normalized_cycle_index=add_normalized_cycle_index,\n add_c_rate=add_c_rate,\n normalization_cycles=normalization_cycles,\n nom_cap=nom_cap,\n )\n else:\n logging.debug(\"creating summary for only one test\")\n dataset_number = self._validate_dataset_number(dataset_number)\n if dataset_number is None:\n self._report_empty_dataset()\n return\n self._make_summary(\n dataset_number,\n find_ocv=find_ocv,\n find_ir=find_ir,\n find_end_voltage=find_end_voltage,\n use_cellpy_stat_file=use_cellpy_stat_file,\n ensure_step_table=ensure_step_table,\n add_normalized_cycle_index=add_normalized_cycle_index,\n add_c_rate=add_c_rate,\n normalization_cycles=normalization_cycles,\n nom_cap=nom_cap,\n )\n return self\n\n def _make_summary(\n self,\n dataset_number=None,\n mass=None,\n update_it=False,\n select_columns=True,\n find_ocv=False,\n find_ir=False,\n find_end_voltage=False,\n ensure_step_table=True,\n # TODO: @jepe - include option for omitting steps\n sort_my_columns=True,\n use_cellpy_stat_file=False,\n add_normalized_cycle_index=True,\n add_c_rate=False,\n normalization_cycles=None,\n nom_cap=None,\n # capacity_modifier = None,\n # test=None\n ):\n cycle_index_as_index = True\n\n time_00 = time.time()\n\n dataset_number = self._validate_dataset_number(dataset_number)\n\n logging.debug(\"start making summary\")\n if dataset_number is None:\n self._report_empty_dataset()\n return\n dataset = self.cells[dataset_number]\n # if test.merged == True:\n # use_cellpy_stat_file=False\n\n if not mass:\n mass = dataset.mass or 1.0\n else:\n if update_it:\n dataset.mass = mass\n\n if ensure_step_table and not self.load_only_summary:\n logging.debug(\"ensuring existence of step-table\")\n if not dataset.steps_made:\n logging.debug(\"dataset.step_table_made is not True\")\n logging.info(\"running make_step_table\")\n if nom_cap is not None:\n dataset.nom_cap = nom_cap\n self.make_step_table(dataset_number=dataset_number)\n\n # Retrieve the converters etc.\n specific_converter = self.get_converter_to_specific(dataset=dataset, mass=mass)\n\n hdr_normal = self.headers_normal\n dt_txt = hdr_normal.datetime_txt\n tt_txt = hdr_normal.test_time_txt\n st_txt = hdr_normal.step_time_txt\n c_txt = hdr_normal.cycle_index_txt\n d_txt = hdr_normal.data_point_txt\n s_txt = hdr_normal.step_index_txt\n voltage_header = hdr_normal.voltage_txt\n charge_txt = hdr_normal.charge_capacity_txt\n discharge_txt = hdr_normal.discharge_capacity_txt\n ir_txt = hdr_normal.internal_resistance_txt\n test_id_txt = hdr_normal.test_id_txt\n i_txt = hdr_normal.current_txt\n\n hdr_summary = self.headers_summary\n discharge_title = hdr_summary.discharge_capacity\n charge_title = hdr_summary.charge_capacity\n cumcharge_title = hdr_summary.cumulated_charge_capacity\n cumdischarge_title = hdr_summary.cumulated_discharge_capacity\n coulomb_title = hdr_summary.coulombic_efficiency\n cumcoulomb_title = hdr_summary.cumulated_coulombic_efficiency\n coulomb_diff_title = hdr_summary.coulombic_difference\n cumcoulomb_diff_title = hdr_summary.cumulated_coulombic_difference\n col_discharge_loss_title = hdr_summary.discharge_capacity_loss\n col_charge_loss_title = hdr_summary.charge_capacity_loss\n dcloss_cumsum_title = hdr_summary.cumulated_discharge_capacity_loss\n closs_cumsum_title = hdr_summary.cumulated_charge_capacity_loss\n endv_charge_title = hdr_summary.end_voltage_charge\n endv_discharge_title = hdr_summary.end_voltage_discharge\n ocv_1_v_min_title = hdr_summary.ocv_first_min\n ocv_1_v_max_title = hdr_summary.ocv_first_max\n ocv_2_v_min_title = hdr_summary.ocv_second_min\n ocv_2_v_max_title = hdr_summary.ocv_second_max\n ir_discharge_title = hdr_summary.ir_discharge\n ir_charge_title = hdr_summary.ir_charge\n\n ric_disconnect_title = hdr_summary.cumulated_ric_disconnect\n ric_sei_title = hdr_summary.cumulated_ric_sei\n ric_title = hdr_summary.cumulated_ric\n high_level_at_cycle_n_txt = hdr_summary.high_level\n low_level_at_cycle_n_txt = hdr_summary.low_level\n shifted_charge_capacity_title = hdr_summary.shifted_charge_capacity\n shifted_discharge_capacity_title = hdr_summary.shifted_discharge_capacity\n\n h_normalized_cycle = hdr_summary.normalized_cycle_index\n\n hdr_steps = self.headers_step_table\n\n # Here are the two main DataFrames for the test\n # (raw-data and summary-data)\n summary_df = dataset.summary\n if not self.load_only_summary:\n # Can't find summary from raw data if raw data is not loaded.\n raw = dataset.raw\n if use_cellpy_stat_file:\n # This should work even if raw does not\n # contain all data from the test\n try:\n summary_requirment = raw[d_txt].isin(summary_df[d_txt])\n except KeyError:\n logging.info(\"Error in stat_file (?) - using _select_last\")\n summary_requirment = self._select_last(raw)\n else:\n summary_requirment = self._select_last(raw)\n summary = raw[summary_requirment].copy()\n else:\n # summary_requirment = self._reloadrows_raw(summary_df[d_txt])\n summary = summary_df\n dataset.summary = summary\n logging.warning(\"not implemented yet\")\n return\n\n column_names = summary.columns\n summary_length = len(summary[column_names[0]])\n summary.index = list(range(summary_length))\n # could also index based on Cycle_Index\n # indexes = summary.index\n\n if select_columns:\n columns_to_keep = [charge_txt, c_txt, d_txt, dt_txt, discharge_txt, tt_txt]\n for cn in column_names:\n if not columns_to_keep.count(cn):\n summary.pop(cn)\n\n if not use_cellpy_stat_file:\n logging.debug(\"not using cellpy statfile\")\n # logging.debug(\"Values obtained from raw:\")\n # logging.debug(summary.head(20))\n\n # logging.debug(\"Creates summary: specific discharge ('%s')\"\n # % discharge_title)\n summary[discharge_title] = summary[discharge_txt] * specific_converter\n\n # logging.debug(\"Creates summary: specific scharge ('%s')\" %\n # charge_title)\n summary[charge_title] = summary[charge_txt] * specific_converter\n\n # logging.debug(\"Creates summary: cumulated specific charge ('%s')\" %\n # cumdischarge_title)\n summary[cumdischarge_title] = summary[discharge_title].cumsum()\n\n # logging.debug(\"Creates summary: cumulated specific charge ('%s')\" %\n # cumcharge_title)\n summary[cumcharge_title] = summary[charge_title].cumsum()\n\n if self.cycle_mode == \"anode\":\n logging.info(\n \"Assuming cycling in anode half-cell (discharge before charge) mode\"\n )\n _first_step_txt = discharge_title\n _second_step_txt = charge_title\n else:\n logging.info(\"Assuming cycling in full-cell / cathode mode\")\n _first_step_txt = charge_title\n _second_step_txt = discharge_title\n\n # logging.debug(\"Creates summary: coulombic efficiency ('%s')\" %\n # coulomb_title)\n # logging.debug(\"100 * ('%s')/('%s)\" % (_second_step_txt,\n # _first_step_txt))\n summary[coulomb_title] = (\n 100.0 * summary[_second_step_txt] / summary[_first_step_txt]\n )\n\n # logging.debug(\"Creates summary: coulombic difference ('%s')\" %\n # coulomb_diff_title)\n # logging.debug(\"'%s') - ('%s)\" % (_second_step_txt, _first_step_txt))\n summary[coulomb_diff_title] = (\n summary[_second_step_txt] - summary[_first_step_txt]\n )\n\n # logging.debug(\"Creates summary: cumulated \"\n # f\"coulombic efficiency ('{cumcoulomb_title}')\")\n summary[cumcoulomb_title] = summary[coulomb_title].cumsum()\n # logging.debug(\"Creates summary: cumulated coulombic difference \"\n # \"f('{cumcoulomb_diff_title}')\")\n summary[cumcoulomb_diff_title] = summary[coulomb_diff_title].cumsum()\n\n # ---------------- discharge loss ---------------------\n # Assume that both charge and discharge is defined as positive.\n # The gain for cycle n (compared to cycle n-1)\n # is then cap[n] - cap[n-1]. The loss is the negative of gain.\n # discharge loss = discharge_cap[n-1] - discharge_cap[n]\n # logging.debug(\"Creates summary: calculates DL\")\n summary[col_discharge_loss_title] = (\n summary[discharge_title].shift(1) - summary[discharge_title]\n )\n\n summary[dcloss_cumsum_title] = summary[col_discharge_loss_title].cumsum()\n\n # ---------------- charge loss ------------------------\n # charge loss = charge_cap[n-1] - charge_cap[n]\n summary[col_charge_loss_title] = (\n summary[charge_title].shift(1) - summary[charge_title]\n )\n\n summary[closs_cumsum_title] = summary[col_charge_loss_title].cumsum()\n\n # --------------- D.L. --------------------------------\n # NH_n: high level at cycle n. The slope NHn=f(n) is linked to SEI loss\n # NB_n: low level (summation of irreversible capacities) at cycle n\n # Ref_n: sum[i=1 to ref](Q_charge_i - Q_discharge_i) + Q_charge_ref\n # Typically, ref should be a number where the electrode has become\n # stable (i.e. 5).\n # NBn/100 = sum[i=1 to n](Q_charge_i - Q_discharge_i) / Ref_n\n # NHn/100 = Q_charge_n + sum[i=1 to n-1](Q_charge_i - Q_discharge_i)\n # / Ref_n\n # NH = 100% ok if NH<120 at n=200\n # NB = 20% stable (or less)\n\n n = self.daniel_number\n cap_ref = summary.loc[summary[c_txt] == n, _first_step_txt]\n if not cap_ref.empty:\n cap_ref = cap_ref.values[0]\n\n ref = (\n summary.loc[summary[c_txt] < n, _second_step_txt].sum()\n + summary.loc[summary[c_txt] < n, _first_step_txt].sum()\n + cap_ref\n )\n\n summary[low_level_at_cycle_n_txt] = (100 / ref) * (\n summary[_first_step_txt].cumsum() - summary[_second_step_txt].cumsum()\n )\n\n summary[high_level_at_cycle_n_txt] = (100 / ref) * (\n summary[_first_step_txt]\n + summary[_first_step_txt].cumsum()\n - summary[_second_step_txt].cumsum()\n )\n else:\n txt = f\"ref cycle number: {n}\"\n logging.info(\n \"could not extract low-high levels (ref cycle number does not exist)\"\n )\n # logging.info(txt)\n summary[low_level_at_cycle_n_txt] = np.nan\n summary[high_level_at_cycle_n_txt] = np.nan\n\n # --------------relative irreversible capacities\n # as defined by Gauthier et al.---\n # RIC = discharge_cap[n-1] - charge_cap[n] / charge_cap[n-1]\n RIC = (summary[_first_step_txt].shift(1) - summary[_second_step_txt]) / summary[\n _second_step_txt\n ].shift(1)\n summary[ric_title] = RIC.cumsum()\n\n # RIC_SEI = discharge_cap[n] - charge_cap[n-1] / charge_cap[n-1]\n RIC_SEI = (\n summary[_first_step_txt] - summary[_second_step_txt].shift(1)\n ) / summary[_second_step_txt].shift(1)\n summary[ric_sei_title] = RIC_SEI.cumsum()\n\n # RIC_disconnect = charge_cap[n-1] - charge_cap[n] / charge_cap[n-1]\n RIC_disconnect = (\n summary[_second_step_txt].shift(1) - summary[_second_step_txt]\n ) / summary[_second_step_txt].shift(1)\n summary[ric_disconnect_title] = RIC_disconnect.cumsum()\n\n # -------------- shifted capacities as defined by J. Dahn et al. -----\n # need to double check this (including checking\n # if it is valid in cathode mode).\n individual_edge_movement = summary[_first_step_txt] - summary[_second_step_txt]\n\n summary[shifted_charge_capacity_title] = individual_edge_movement.cumsum()\n summary[shifted_discharge_capacity_title] = (\n summary[shifted_charge_capacity_title] + summary[_first_step_txt]\n )\n\n # if convert_date:\n # # TODO: should move this to the instrument reader procedure\n # logging.debug(\"converting date from xls-type\")\n # summary[date_time_txt_title] = \\\n # summary[dt_txt].apply(xldate_as_datetime) # , option=\"to_string\")\n\n if find_ocv and not self.load_only_summary:\n warnings.warn(DeprecationWarning(\"this option will be removed in v.0.4.0\"))\n # should remove this option\n logging.info(\"CONGRATULATIONS\")\n logging.info(\"-thought this would never be run!\")\n logging.info(\"-find_ocv in make_summary\")\n logging.info(\n \" this is a stupid routine that can be implemented much better!\"\n )\n do_ocv_1 = True\n do_ocv_2 = True\n\n ocv1_type = \"ocvrlx_up\"\n ocv2_type = \"ocvrlx_down\"\n\n if not self.cycle_mode == \"anode\":\n ocv2_type = \"ocvrlx_up\"\n ocv1_type = \"ocvrlx_down\"\n\n ocv_1 = self._get_ocv(\n ocv_steps=dataset.ocv_steps,\n ocv_type=ocv1_type,\n dataset_number=dataset_number,\n )\n\n ocv_2 = self._get_ocv(\n ocv_steps=dataset.ocv_steps,\n ocv_type=ocv2_type,\n dataset_number=dataset_number,\n )\n\n if do_ocv_1:\n only_zeros = summary[discharge_txt] * 0.0\n ocv_1_indexes = []\n ocv_1_v_min = []\n ocv_1_v_max = []\n ocvcol_min = only_zeros.copy()\n ocvcol_max = only_zeros.copy()\n\n for j in ocv_1:\n cycle = j[\"Cycle_Index\"].values[0] # jepe fix\n # try to find inxed\n index = summary[(summary[c_txt] == cycle)].index\n # print cycle, index,\n v_min = j[\"Voltage\"].min() # jepe fix\n v_max = j[\"Voltage\"].max() # jepe fix\n # print v_min,v_max\n dv = v_max - v_min\n ocvcol_min.iloc[index] = v_min\n ocvcol_max.iloc[index] = v_max\n\n summary.insert(0, column=ocv_1_v_min_title, value=ocvcol_min)\n summary.insert(0, column=ocv_1_v_max_title, value=ocvcol_max)\n\n if do_ocv_2:\n only_zeros = summary[discharge_txt] * 0.0\n ocv_2_indexes = []\n ocv_2_v_min = []\n ocv_2_v_max = []\n ocvcol_min = only_zeros.copy()\n ocvcol_max = only_zeros.copy()\n\n for j in ocv_2:\n cycle = j[\"Cycle_Index\"].values[0] # jepe fix\n # try to find inxed\n index = summary[(summary[c_txt] == cycle)].index\n v_min = j[\"Voltage\"].min() # jepe fix\n v_max = j[\"Voltage\"].max() # jepe fix\n dv = v_max - v_min\n ocvcol_min.iloc[index] = v_min\n ocvcol_max.iloc[index] = v_max\n summary.insert(0, column=ocv_2_v_min_title, value=ocvcol_min)\n summary.insert(0, column=ocv_2_v_max_title, value=ocvcol_max)\n\n if find_end_voltage and not self.load_only_summary:\n # needs to be fixed so that end-voltage also can be extracted\n # from the summary\n ev_t0 = time.time()\n logging.debug(\"finding end-voltage\")\n logging.debug(f\"dt: {time.time() - ev_t0}\")\n only_zeros_discharge = summary[discharge_txt] * 0.0\n only_zeros_charge = summary[charge_txt] * 0.0\n if not dataset.discharge_steps:\n logging.debug(\"need to collect discharge steps\")\n discharge_steps = self.get_step_numbers(\n steptype=\"discharge\", allctypes=False, dataset_number=dataset_number\n )\n logging.debug(f\"dt: {time.time() - ev_t0}\")\n else:\n discharge_steps = dataset.discharge_steps\n logging.debug(\" already have discharge_steps\")\n if not dataset.charge_steps:\n logging.debug(\"need to collect charge steps\")\n charge_steps = self.get_step_numbers(\n steptype=\"charge\", allctypes=False, dataset_number=dataset_number\n )\n logging.debug(f\"dt: {time.time() - ev_t0}\")\n else:\n charge_steps = dataset.charge_steps\n logging.debug(\" already have charge_steps\")\n\n endv_indexes = []\n endv_values_dc = []\n endv_values_c = []\n # logging.debug(\"trying to find end voltage for\")\n # logging.debug(dataset.loaded_from)\n # logging.debug(\"Using the following chargesteps\")\n # logging.debug(charge_steps)\n # logging.debug(\"Using the following dischargesteps\")\n # logging.debug(discharge_steps)\n logging.debug(\"starting iterating through the index\")\n for i in summary.index:\n # txt = \"index in summary.index: %i\" % i\n # logging.debug(txt)\n # selecting the appropriate cycle\n cycle = summary.iloc[i][c_txt]\n # txt = \"cycle: %i\" % cycle\n # logging.debug(txt)\n step = discharge_steps[cycle]\n\n # finding end voltage for discharge\n if step[-1]: # selecting last\n # TODO: @jepe - use pd.loc[row,column]\n # for col or pd.loc[(pd.[\"step\"]==1),\"x\"]\n end_voltage_dc = raw[\n (raw[c_txt] == cycle) & (dataset.raw[s_txt] == step[-1])\n ][voltage_header]\n # This will not work if there are more than one item in step\n end_voltage_dc = end_voltage_dc.values[-1] # selecting\n # last (could also select amax)\n else:\n end_voltage_dc = 0 # could also use numpy.nan\n\n # finding end voltage for charge\n step2 = charge_steps[cycle]\n if step2[-1]:\n end_voltage_c = raw[\n (raw[c_txt] == cycle) & (dataset.raw[s_txt] == step2[-1])\n ][voltage_header]\n end_voltage_c = end_voltage_c.values[-1]\n # end_voltage_c = np.amax(end_voltage_c)\n else:\n end_voltage_c = 0\n endv_indexes.append(i)\n endv_values_dc.append(end_voltage_dc)\n endv_values_c.append(end_voltage_c)\n logging.debug(\"finished iterating\")\n logging.debug(f\"find end V took: {time.time() - ev_t0} s\")\n ir_frame_dc = only_zeros_discharge + endv_values_dc\n ir_frame_c = only_zeros_charge + endv_values_c\n summary.insert(0, column=endv_discharge_title, value=ir_frame_dc)\n summary.insert(0, column=endv_charge_title, value=ir_frame_c)\n\n if find_ir and (not self.load_only_summary) and (ir_txt in dataset.raw.columns):\n # should check: test.charge_steps = None,\n # test.discharge_steps = None\n # THIS DOES NOT WORK PROPERLY!!!!\n # Found a file where it writes IR for cycle n on cycle n+1\n # This only picks out the data on the last IR step before\n logging.debug(\"finding ir\")\n only_zeros = summary[discharge_txt] * 0.0\n if not dataset.discharge_steps:\n discharge_steps = self.get_step_numbers(\n steptype=\"discharge\", allctypes=False, dataset_number=dataset_number\n )\n else:\n discharge_steps = dataset.discharge_steps\n logging.debug(\" already have discharge_steps\")\n if not dataset.charge_steps:\n charge_steps = self.get_step_numbers(\n steptype=\"charge\", allctypes=False, dataset_number=dataset_number\n )\n else:\n charge_steps = dataset.charge_steps\n logging.debug(\" already have charge_steps\")\n\n ir_indexes = []\n ir_values = []\n ir_values2 = []\n # logging.debug(\"trying to find ir for\")\n # logging.debug(dataset.loaded_from)\n # logging.debug(\"Using the following charge_steps\")\n # logging.debug(charge_steps)\n # logging.debug(\"Using the following discharge_steps\")\n # logging.debug(discharge_steps)\n\n for i in summary.index:\n # txt = \"index in summary.index: %i\" % i\n # logging.debug(txt)\n # selecting the appropriate cycle\n cycle = summary.iloc[i][c_txt] # \"Cycle_Index\" = i + 1\n # txt = \"cycle: %i\" % cycle\n # logging.debug(txt)\n step = discharge_steps[cycle]\n if step[0]:\n ir = raw.loc[\n (raw[c_txt] == cycle) & (dataset.raw[s_txt] == step[0]), ir_txt\n ]\n # This will not work if there are more than one item in step\n ir = ir.values[0]\n else:\n ir = 0\n step2 = charge_steps[cycle]\n if step2[0]:\n\n ir2 = raw[(raw[c_txt] == cycle) & (dataset.raw[s_txt] == step2[0])][\n ir_txt\n ].values[0]\n else:\n ir2 = 0\n ir_indexes.append(i)\n ir_values.append(ir)\n ir_values2.append(ir2)\n\n ir_frame = only_zeros + ir_values\n ir_frame2 = only_zeros + ir_values2\n summary.insert(0, column=ir_discharge_title, value=ir_frame)\n summary.insert(0, column=ir_charge_title, value=ir_frame2)\n\n if add_normalized_cycle_index:\n if normalization_cycles is not None:\n logging.info(\n f\"Using these cycles for finding the nominal capacity: {normalization_cycles}\"\n )\n if not isinstance(normalization_cycles, (list, tuple)):\n normalization_cycles = [normalization_cycles]\n\n cap_ref = summary.loc[\n summary[c_txt].isin(normalization_cycles), _first_step_txt\n ]\n if not cap_ref.empty:\n nom_cap = cap_ref.mean()\n else:\n logging.info(f\"Empty reference cycle(s)\")\n\n if nom_cap is None:\n logging.debug(f\"No nom_cap given\")\n nom_cap = self.cell.nom_cap\n logging.info(f\"Using the following nominal capacity: {nom_cap}\")\n summary[h_normalized_cycle] = summary[cumcharge_title] / nom_cap\n\n if add_c_rate:\n logging.debug(\"Extracting C-rates\")\n steps = self.cell.steps\n\n # if hdr_summary.cycle_index not in summary.columns:\n # summary = summary.reset_index()\n\n charge_steps = steps.loc[\n steps.type == \"charge\", [hdr_steps.cycle, \"rate_avr\"]\n ].rename(columns={\"rate_avr\": hdr_summary.charge_c_rate})\n\n summary = summary.merge(\n charge_steps.drop_duplicates(subset=[hdr_steps.cycle], keep=\"first\"),\n left_on=hdr_summary.cycle_index,\n right_on=hdr_steps.cycle,\n how=\"left\",\n ).drop(columns=hdr_steps.cycle)\n\n discharge_steps = steps.loc[\n steps.type == \"discharge\", [hdr_steps.cycle, \"rate_avr\"]\n ].rename(columns={\"rate_avr\": hdr_summary.discharge_c_rate})\n\n summary = summary.merge(\n discharge_steps.drop_duplicates(subset=[hdr_steps.cycle], keep=\"first\"),\n left_on=hdr_summary.cycle_index,\n right_on=hdr_steps.cycle,\n how=\"left\",\n ).drop(columns=hdr_steps.cycle)\n\n if sort_my_columns:\n logging.debug(\"sorting columns\")\n new_first_col_list = [dt_txt, tt_txt, d_txt, c_txt]\n summary = self.set_col_first(summary, new_first_col_list)\n\n if cycle_index_as_index:\n index_col = hdr_summary.cycle_index\n try:\n summary.set_index(index_col, inplace=True)\n except KeyError:\n logging.debug(\"Setting cycle_index as index failed\")\n\n dataset.summary = summary\n logging.debug(f\"(dt: {(time.time() - time_00):4.2f}s)\")\n\n def inspect_nominal_capacity(self, cycles=None):\n \"\"\"Method for estimating the nominal capacity\n\n Args:\n cycles (list of ints): the cycles where it is assumed that the cell reaches nominal capacity.\n\n Returns:\n Nominal capacity (float).\n \"\"\"\n logging.debug(\"inspecting: nominal capacity\")\n print(\"Sorry! This method is still under development.\")\n print(\"Maybe you can plot your data and find the nominal capacity yourself?\")\n if cycles is None:\n cycles = [1, 2, 3]\n\n summary = self.cell.summary\n\n try:\n nc = summary.loc[\n summary[self.headers_normal.cycle_index_txt].isin(cycles),\n self.headers_summary.discharge_capacity,\n ].mean()\n print(\"All I can say for now is that the average discharge capacity\")\n print(f\"for the cycles {cycles} is {nc:0.2f}\")\n nc = float(nc)\n\n except ZeroDivisionError:\n print(\"zero division error\")\n nc = None\n\n return nc\n\n\ndef get(\n filename=None,\n mass=None,\n instrument=None,\n nominal_capacity=None,\n logging_mode=None,\n cycle_mode=None,\n auto_summary=True,\n **kwargs,\n):\n \"\"\"Create a CellpyData object\n\n Args:\n filename (str, os.PathLike, or list of raw-file names): path to file(s)\n mass (float): mass of active material (mg) (defaults to mass given in cellpy-file or 1.0)\n instrument (str): instrument to use (defaults to the one in your cellpy config file) (arbin_res, arbin_sql, arbin_sql_csv, arbin_sql_xlxs)\n nominal_capacity (float): nominal capacity for the cell (e.g. used for finding C-rates)\n logging_mode (str): \"INFO\" or \"DEBUG\"\n cycle_mode (str): the cycle mode (e.g. \"anode\" or \"full_cell\")\n auto_summary (bool): (re-) create summary.\n **kwargs: sent to the loader\n\n Returns:\n CellpyData object (if successful, None if not)\n\n \"\"\"\n\n from cellpy import log\n\n log.setup_logging(default_level=logging_mode)\n logging.debug(\"-------running-get--------\")\n cellpy_instance = CellpyData()\n\n db_readers = [\"arbin_sql\"]\n\n if instrument is not None:\n cellpy_instance.set_instrument(instrument=instrument)\n\n if cellpy_instance.tester in db_readers:\n file_needed = False\n else:\n file_needed = True\n\n if cycle_mode is not None:\n cellpy_instance.cycle_mode = cycle_mode\n\n if filename is not None:\n if file_needed:\n if not isinstance(filename, (list, tuple)):\n filename = Path(filename)\n\n if not filename.is_file():\n print(f\"Could not find {filename}\")\n print(\"Returning None\")\n return\n\n if filename.suffix in [\".h5\", \".hdf5\", \".cellpy\", \".cpy\"]:\n logging.info(f\"Loading cellpy-file: {filename}\")\n cellpy_instance.load(filename, **kwargs)\n\n # in case the user wants to give another mass to the cell:\n if mass is not None:\n logging.info(f\"Setting mass: {mass}\")\n cellpy_instance.set_mass(mass)\n if auto_summary:\n logging.info(\"Creating step table\")\n cellpy_instance.make_step_table()\n logging.info(\"Creating summary data\")\n cellpy_instance.make_summary()\n logging.info(\"Created CellpyData object\")\n return cellpy_instance\n\n # raw file\n logging.info(f\"Loading raw-file: {filename}\")\n cellpy_instance.from_raw(filename, **kwargs)\n if not cellpy_instance:\n print(\"Could not load file: check log!\")\n print(\"Returning None\")\n return\n\n if mass is not None:\n logging.info(f\"Setting mass: {mass}\")\n cellpy_instance.set_mass(mass)\n\n if nominal_capacity is not None:\n logging.info(f\"Setting nominal capacity: {nominal_capacity}\")\n cellpy_instance.set_nom_cap(nominal_capacity)\n\n if auto_summary:\n logging.info(\"Creating step table\")\n cellpy_instance.make_step_table()\n logging.info(\"Creating summary data\")\n cellpy_instance.make_summary()\n else:\n if mass:\n prms.Materials[\"default_mass\"] = mass\n prms.Materials[\"default_mass\"] = mass\n if nominal_capacity:\n prms.DataSet[\"nom_cap\"] = nominal_capacity\n\n logging.info(\"Created CellpyData object\")\n return cellpy_instance\n\n\nif __name__ == \"__main__\":\n print(\"running\", end=\" \")\n print(sys.argv[0])\n import logging\n from cellpy import log\n\n log.setup_logging(default_level=\"DEBUG\")\n\n from cellpy.utils import example_data\n\n f = example_data.cellpy_file_path()\n print(f)\n print(f.is_file())\n c = CellpyData()\n c.dev_load(f, accept_old=True)\n c.make_step_table()\n c.make_summary()\n print(\"Here we have it\")\n print(c.cell.summary.columns)\n print(c.cell.steps.columns)\n print(c.cell.raw.columns)\n"
] |
[
[
"numpy.amax",
"pandas.read_csv",
"pandas.concat",
"pandas.Series",
"numpy.unique",
"numpy.amin",
"numpy.median",
"pandas.DataFrame",
"numpy.ceil",
"scipy.interpolate.interp1d",
"pandas.HDFStore",
"pandas.set_option"
]
] |
wuch15/FedKD
|
[
"6166696e4abd74a16c2d7d80aa7983b99c17e153"
] |
[
"model.py"
] |
[
"import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport os\nimport math\n\n\nclass AdditiveAttention(nn.Module):\n ''' AttentionPooling used to weighted aggregate news vectors\n Arg: \n d_h: the last dimension of input\n '''\n def __init__(self, d_h, hidden_size=200):\n super(AdditiveAttention, self).__init__()\n self.att_fc1 = nn.Linear(d_h, hidden_size)\n self.att_fc2 = nn.Linear(hidden_size, 1)\n\n def forward(self, x, attn_mask=None):\n \"\"\"\n Args:\n x: batch_size, candidate_size, candidate_vector_dim\n attn_mask: batch_size, candidate_size\n Returns:\n (shape) batch_size, candidate_vector_dim\n \"\"\"\n bz = x.shape[0]\n e = self.att_fc1(x)\n e = nn.Tanh()(e)\n alpha = self.att_fc2(e)\n\n alpha = torch.exp(alpha)\n if attn_mask is not None:\n alpha = alpha * attn_mask.unsqueeze(2)\n alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)\n\n x = torch.bmm(x.permute(0, 2, 1), alpha)\n x = torch.reshape(x, (bz, -1)) # (bz, 400)\n return x\n\n\nclass ScaledDotProductAttention(nn.Module):\n def __init__(self, d_k):\n super(ScaledDotProductAttention, self).__init__()\n self.d_k = d_k\n\n def forward(self, Q, K, V, attn_mask=None):\n # [bz, 20, seq_len, 20] x [bz, 20, 20, seq_len] -> [bz, 20, seq_len, seq_len]\n scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)\n scores = torch.exp(scores)\n\n if attn_mask is not None:\n scores = scores * attn_mask\n attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)\n\n # [bz, 20, seq_len, seq_len] x [bz, 20, seq_len, 20] -> [bz, 20, seq_len, 20]\n context = torch.matmul(attn, V)\n return context, attn\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, d_model, n_heads, d_k, d_v, enable_gpu):\n super(MultiHeadAttention, self).__init__()\n self.d_model = d_model # 300\n self.n_heads = n_heads # 20\n self.d_k = d_k # 20\n self.d_v = d_v # 20\n self.enable_gpu = enable_gpu\n\n self.W_Q = nn.Linear(d_model, d_k * n_heads) # 300, 400\n self.W_K = nn.Linear(d_model, d_k * n_heads) # 300, 400\n self.W_V = nn.Linear(d_model, d_v * n_heads) # 300, 400\n\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight, gain=1)\n\n def forward(self, Q, K, V, mask=None):\n # Q, K, V: [bz, seq_len, 300] -> W -> [bz, seq_len, 400]-> q_s: [bz, 20, seq_len, 20]\n batch_size, seq_len, _ = Q.shape\n\n q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads,\n self.d_k).transpose(1, 2)\n k_s = self.W_K(K).view(batch_size, -1, self.n_heads,\n self.d_k).transpose(1, 2)\n v_s = self.W_V(V).view(batch_size, -1, self.n_heads,\n self.d_v).transpose(1, 2)\n\n if mask is not None:\n mask = mask.unsqueeze(1).expand(batch_size, seq_len, seq_len) # [bz, seq_len, seq_len]\n mask = mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1) # attn_mask : [bz, 20, seq_len, seq_len]\n\n context, attn = ScaledDotProductAttention(self.d_k)(\n q_s, k_s, v_s, mask) # [bz, 20, seq_len, 20]\n context = context.transpose(1, 2).contiguous().view(\n batch_size, -1, self.n_heads * self.d_v) # [bz, seq_len, 400]\n # output = self.fc(context)\n return context #self.layer_norm(output + residual)\n\nclass WeightedLinear(torch.nn.Module):\n def __init__(self, in_features: int, out_features: int) -> None:\n super(WeightedLinear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(torch.Tensor(out_features, in_features))\n self.reset_parameters()\n\n def reset_parameters(self) -> None:\n nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n weight_softmax = nn.Softmax(dim=-1)(self.weight)\n return F.linear(input, weight_softmax)\n\n def extra_repr(self) -> str:\n return 'in_features={}, out_features={}'.format(\n self.in_features, self.out_features\n )\n\nclass TextEncoder(torch.nn.Module):\n def __init__(self,\n word_embedding,\n word_embedding_dim,\n num_attention_heads,\n query_vector_dim,\n dropout_rate,\n enable_gpu=True):\n super(TextEncoder, self).__init__()\n self.word_embedding = word_embedding\n self.dropout_rate = dropout_rate\n self.multihead_attention = MultiHeadAttention(word_embedding_dim,\n num_attention_heads, 20,\n 20, enable_gpu)\n self.additive_attention = AdditiveAttention(num_attention_heads * 20,\n query_vector_dim)\n\n def forward(self, text, mask=None):\n \"\"\"\n Args:\n text: Tensor(batch_size) * num_words_text\n Returns:\n (shape) batch_size, word_embedding_dim\n \"\"\"\n # batch_size, num_words_text, word_embedding_dim\n text_vector = F.dropout(self.word_embedding(text.long()),\n p=self.dropout_rate,\n training=self.training)\n # batch_size, num_words_text, word_embedding_dim\n multihead_text_vector = self.multihead_attention(\n text_vector, text_vector, text_vector, mask)\n multihead_text_vector = F.dropout(multihead_text_vector,\n p=self.dropout_rate,\n training=self.training)\n # batch_size, word_embedding_dim\n text_vector = self.additive_attention(multihead_text_vector, mask)\n return text_vector\n\n\nclass ElementEncoder(torch.nn.Module):\n def __init__(self, num_elements, embedding_dim, enable_gpu=True):\n super(ElementEncoder, self).__init__()\n self.enable_gpu = enable_gpu\n self.embedding = nn.Embedding(num_elements,\n embedding_dim,\n padding_idx=0)\n\n def forward(self, element):\n # batch_size, embedding_dim\n element_vector = self.embedding(\n (element.cuda() if self.enable_gpu else element).long())\n return element_vector\n\n\nclass NewsEncoder(torch.nn.Module):\n def __init__(self, args, word_embedding, category_dict_size,\n domain_dict_size, subcategory_dict_size):\n super(NewsEncoder, self).__init__()\n self.args = args\n self.attributes2length = {\n 'title': args.num_words_title,\n 'abstract': args.num_words_abstract,\n 'body': args.num_words_body,\n 'category': 1,\n 'domain': 1,\n 'subcategory': 1\n }\n for key in list(self.attributes2length.keys()):\n if key not in args.news_attributes:\n self.attributes2length[key] = 0\n\n self.attributes2start = {\n key: sum(\n list(self.attributes2length.values())\n [:list(self.attributes2length.keys()).index(key)])\n for key in self.attributes2length.keys()\n }\n\n self.word_embedding = word_embedding\n assert len(args.news_attributes) > 0\n text_encoders_candidates = ['title', 'abstract']\n self.text_encoders = nn.ModuleDict({\n name:\n TextEncoder(self.word_embedding, args.word_embedding_dim,\n args.num_attention_heads, args.news_query_vector_dim,\n args.drop_rate, args.enable_gpu)\n for name in (set(args.news_attributes)\n & set(text_encoders_candidates))\n })\n\n if args.use_pretrain_news_encoder:\n for name in self.text_encoders:\n self.text_encoders[name].load_state_dict(\n torch.load(os.path.join(args.pretrain_news_encoder_path, \n 'behavior_encoder.pkl'))\n )\n\n if 'body' in args.news_attributes:\n assert args.num_words_body % 4 == 0\n self.num_words_body_segment = args.num_words_body // 4\n self.body_encoder = TextEncoder(self.word_embedding, args.word_embedding_dim,\n args.num_attention_heads,\n args.news_query_vector_dim, args.drop_rate,\n args.enable_gpu)\n\n if args.use_pretrain_news_encoder:\n self.body_encoder.load_state_dict(\n torch.load(os.path.join(args.pretrain_news_encoder_path, \n 'behavior_encoder.pkl'))\n )\n\n \n name2num = {\n \"category\": category_dict_size + 1,\n \"domain\": domain_dict_size + 1,\n \"subcategory\": subcategory_dict_size + 1\n }\n element_encoders_candidates = ['category', 'domain', 'subcategory']\n self.element_encoders = nn.ModuleDict({\n name: ElementEncoder(name2num[name], \n args.num_attention_heads * 20,\n args.enable_gpu)\n for name in (set(args.news_attributes)\n & set(element_encoders_candidates))\n })\n if len(args.news_attributes) > 1:\n self.final_attention = AdditiveAttention(\n args.num_attention_heads * 20, args.news_query_vector_dim)\n\n self.reduce_dim_linear = nn.Linear(args.num_attention_heads * 20,\n args.news_dim)\n\n if args.use_pretrain_news_encoder:\n self.reduce_dim_linear.load_state_dict(\n torch.load(os.path.join(args.pretrain_news_encoder_path, \n 'reduce_dim_linear.pkl'))\n )\n\n def forward(self, news):\n \"\"\"\n Args:\n Returns:\n (shape) batch_size, news_dim\n \"\"\"\n text_vectors = [\n encoder(\n torch.narrow(news, 1, self.attributes2start[name],\n self.attributes2length[name]))\n for name, encoder in self.text_encoders.items()\n ]\n if 'body' in self.args.news_attributes:\n body = torch.narrow(news, 1, self.attributes2start['body'],\n self.args.num_words_body)\n body = body.reshape(-1, self.num_words_body_segment)\n body_vector = self.body_encoder(body)\n body_vector = body_vector.view(-1, 4, body_vector.size(-1))\n body_vector = torch.mean(body_vector, dim=1)\n\n text_vectors.append(body_vector)\n\n element_vectors = [\n encoder(\n torch.narrow(news, 1, self.attributes2start[name],\n self.attributes2length[name]).squeeze(dim=1))\n for name, encoder in self.element_encoders.items()\n ]\n\n all_vectors = text_vectors + element_vectors\n\n if len(all_vectors) == 1:\n final_news_vector = all_vectors[0]\n else:\n final_news_vector = self.final_attention(\n torch.stack(all_vectors, dim=1))\n\n # final_news_vector = torch.mean(\n # torch.stack(all_vectors, dim=1),\n # dim=1\n # )\n # batch_size, news_dim\n #final_news_vector = self.reduce_dim_linear(final_news_vector)\n return final_news_vector\n\n\nclass UETBingEncoder(torch.nn.Module):\n def __init__(self, args, word_embedding):\n super(UETBingEncoder, self).__init__()\n self.args = args\n self.behavior_encoder = TextEncoder(\n word_embedding,\n args.word_embedding_dim,\n args.num_attention_heads,\n args.news_query_vector_dim,\n args.drop_rate,\n args.enable_gpu)\n self.reduce_dim_linear = nn.Linear(args.num_attention_heads * 20,\n args.news_dim)\n if args.use_pretrain_news_encoder:\n self.behavior_encoder.load_state_dict(\n torch.load(os.path.join(args.pretrain_news_encoder_path, \n 'behavior_encoder.pkl'))\n )\n self.reduce_dim_linear.load_state_dict(\n torch.load(os.path.join(args.pretrain_news_encoder_path, \n 'reduce_dim_linear.pkl'))\n )\n\n def forward(self, behavior_ids):\n behavior_vector = self.behavior_encoder(behavior_ids)\n behavior_vector = self.reduce_dim_linear(behavior_vector)\n return behavior_vector\n\n\nclass UserEncoder(torch.nn.Module):\n def __init__(self, args, word_embedding, uet_encoder=None, uet_reduce_linear=None,\n bing_encoder=None, bing_reduce_linear=None):\n super(UserEncoder, self).__init__()\n self.args = args\n\n self.news_multihead_attention = MultiHeadAttention(args.num_attention_heads * 20,\n args.num_attention_heads, 20,\n 20, args.enable_gpu)\n\n\n self.news_additive_attention = AdditiveAttention(\n args.num_attention_heads * 20, args.user_query_vector_dim)\n if args.use_padded_news_embedding:\n # self.news_padded_news_embedding = nn.Embedding(1, args.num_attention_heads * 20)\n self.pad_doc = nn.Parameter(torch.empty(1, args.num_attention_heads * 20).uniform_(-1, 1)).type(torch.FloatTensor)\n else:\n # self.news_padded_news_embedding = None\n self.pad_doc = None\n\n if args.process_uet:\n if args.title_share_encoder:\n self.uet_encoder = nn.Sequential(\n uet_encoder, uet_reduce_linear)\n else:\n self.uet_encoder = UETBingEncoder(args, word_embedding)\n self.uet_additive_attention = AdditiveAttention(\n args.news_dim, args.user_query_vector_dim)\n\n if args.process_bing:\n if args.title_share_encoder:\n self.bing_encoder = nn.Sequential(\n bing_encoder, bing_reduce_linear)\n else:\n self.bing_encoder = UETBingEncoder(args, word_embedding)\n self.bing_additive_attention = AdditiveAttention(\n args.news_dim, args.user_query_vector_dim)\n \n if args.process_uet or args.process_bing:\n if args.uet_agg_method == 'attention':\n self.user_behavior_att = AdditiveAttention(\n args.news_dim, args.user_query_vector_dim)\n elif args.uet_agg_method == 'weighted-sum':\n self.behavior_linear = WeightedLinear(\n 3 if args.process_bing and args.process_uet else 2, 1)\n \n def get_user_news_scoring(self, log_vec, log_mask):\n log_vec = self._process_news(log_vec, log_mask, self.pad_doc,\n self.news_multihead_attention,\n self.news_additive_attention, self.args.user_log_mask,\n self.args.use_padded_news_embedding)\n\n return log_vec\n\n def get_user_uet_scoring(self, uet_ids, uet_mask):\n batch_size, user_uet_length, num_words_uet = uet_ids.shape\n uet_ids = uet_ids.view(-1, num_words_uet)\n uet_vec = self.uet_encoder(uet_ids).reshape(batch_size, user_uet_length, -1)\n uet_vec = self._process_uet_bing(uet_vec, uet_mask, self.uet_additive_attention)\n\n return uet_vec\n \n def get_user_bing_scoring(self, bing_ids, bing_mask):\n batch_size, user_bing_length, num_words_bing = bing_ids.shape\n bing_ids = bing_ids.view(-1, num_words_bing)\n bing_vec = self.bing_encoder(bing_ids).reshape(batch_size, user_bing_length, -1)\n bing_vec = self._process_uet_bing(bing_vec, bing_mask, self.bing_additive_attention)\n\n return bing_vec\n \n def _process_news(self, vec, mask, pad_doc,\n multihead_attention, additive_attention, use_mask=False, \n use_padded_embedding=False):\n assert not (use_padded_embedding and use_mask), 'Conflicting config'\n if use_padded_embedding:\n # batch_size, maxlen, dim\n batch_size = vec.shape[0]\n padding_doc = pad_doc.expand(batch_size, self.args.num_attention_heads * 20).unsqueeze(1).expand( \\\n batch_size, self.args.user_log_length , self.args.num_attention_heads * 20)\n # batch_size, maxlen, dim\n vec = vec * mask.unsqueeze(2).expand(-1, -1, self.args.num_attention_heads * 20) + padding_doc * (1 - mask.unsqueeze(2).expand(-1, -1, self.args.num_attention_heads * 20))\n # batch_size, news_dim\n vec = multihead_attention(vec, vec, vec, mask if use_mask else None)\n vec = F.dropout(vec, p=self.args.drop_rate, training=self.training)\n vec = additive_attention(vec,\n mask if use_mask else None)\n return vec\n \n def _process_uet_bing(self, vec, mask, additive_attention):\n batch_size = vec.size(0)\n vec = additive_attention(vec, mask)\n if self.training:\n mask_v = torch.empty(batch_size).bernoulli_(self.args.mask_uet_bing_rate)\n if self.args.enable_gpu:\n mask_v = mask_v.cuda()\n vec = vec * mask_v.unsqueeze(1).expand_as(vec)\n return vec\n\n def get_user_news_scoring(self, log_vec, log_mask):\n log_vec = self._process_news(log_vec, log_mask, self.pad_doc,\n self.news_multihead_attention,\n self.news_additive_attention, self.args.user_log_mask,\n self.args.use_padded_news_embedding)\n\n return log_vec\n\n def get_user_uet_scoring(self, uet_ids, uet_mask):\n batch_size, user_uet_length, num_words_uet = uet_ids.shape\n uet_ids = uet_ids.view(-1, num_words_uet)\n uet_vec = self.uet_encoder(uet_ids).reshape(batch_size, user_uet_length, -1)\n uet_vec = self._process_uet_bing(uet_vec, uet_mask, self.uet_additive_attention)\n\n return uet_vec\n \n def get_user_bing_scoring(self, bing_ids, bing_mask):\n batch_size, user_bing_length, num_words_bing = bing_ids.shape\n bing_ids = bing_ids.view(-1, num_words_bing)\n bing_vec = self.bing_encoder(bing_ids).reshape(batch_size, user_bing_length, -1)\n bing_vec = self._process_uet_bing(bing_vec, bing_mask, self.bing_additive_attention)\n\n return bing_vec\n\n \n def forward(self, log_vec, log_mask, uet_ids=None, uet_mask=None, bing_ids=None, bing_mask=None):\n \"\"\"\n Returns:\n (shape) batch_size, news_dim\n \"\"\"\n # batch_size, news_dim\n log_vec = self._process_news(log_vec, log_mask, self.pad_doc,\n self.news_multihead_attention,\n self.news_additive_attention, self.args.user_log_mask,\n self.args.use_padded_news_embedding)\n \n user_log_vecs = [log_vec]\n\n if self.args.process_uet:\n batch_size, user_uet_length, num_words_uet = uet_ids.shape\n uet_ids = uet_ids.view(-1, num_words_uet)\n uet_vec = self.uet_encoder(uet_ids).reshape(batch_size, user_uet_length, -1)\n uet_vec = self._process_uet_bing(uet_vec, uet_mask, self.uet_additive_attention)\n user_log_vecs.append(uet_vec)\n \n if self.args.process_bing:\n batch_size, user_bing_length, num_words_bing = bing_ids.shape\n bing_ids = bing_ids.view(-1, num_words_bing)\n bing_vec = self.bing_encoder(bing_ids).reshape(batch_size, user_bing_length, -1)\n bing_vec = self._process_uet_bing(bing_vec, bing_mask, self.bing_additive_attention)\n user_log_vecs.append(bing_vec)\n\n if len(user_log_vecs) == 1:\n return user_log_vecs[0]\n else:\n if self.args.uet_agg_method == 'attention':\n return self.user_behavior_att(torch.stack(user_log_vecs, dim=1)), user_log_vecs[0]\n if self.args.uet_agg_method == 'sum':\n return torch.sum(torch.stack(user_log_vecs, dim=1), dim=1), user_log_vecs[0]\n if self.args.uet_agg_method == 'weighted-sum':\n return (\n self.behavior_linear(torch.stack(user_log_vecs, dim=1).\\\n transpose(-1, -2)).squeeze(dim=-1), \\\n user_log_vecs[0]\n )\n \n\n\nclass Model(torch.nn.Module):\n \"\"\"\n UniUM network.\n Input 1 + K candidate news and a list of user clicked news, produce the click probability.\n \"\"\"\n def __init__(self,\n args,\n embedding_matrix,\n category_dict_size=0,\n domain_dict_size=0,\n subcategory_dict_size=0):\n super(Model, self).__init__()\n self.args = args\n\n pretrained_news_word_embedding = torch.from_numpy(embedding_matrix).float()\n\n if args.padded_news_different_word_index:\n padded_news_word_embedding = np.random.normal(\n size=(1, args.word_embedding_dim))\n padded_news_word_embedding = torch.from_numpy(\n padded_news_word_embedding).float()\n pretrained_news_word_embedding = torch.cat(\n [pretrained_news_word_embedding, padded_news_word_embedding],\n dim=0)\n\n word_embedding = nn.Embedding.from_pretrained(\n pretrained_news_word_embedding,\n freeze=args.freeze_embedding,\n padding_idx=0)\n\n if args.use_pretrain_news_encoder:\n word_embedding.load_state_dict(\n torch.load(os.path.join(args.pretrain_news_encoder_path, 'word_embedding.pkl'))\n )\n\n self.news_encoder = NewsEncoder(args, word_embedding,\n category_dict_size, \n domain_dict_size,\n subcategory_dict_size)\n \n if args.debias:\n self.news_encoder_debias = NewsEncoder(args, word_embedding,\n category_dict_size,\n domain_dict_size,\n subcategory_dict_size)\n self.debias_linear = nn.Sequential(\n nn.Linear(args.num_attention_heads * 20, args.num_attention_heads * 20//2),\n nn.Tanh(),\n nn.Linear(args.num_attention_heads * 20//2, 1))\n \n if args.title_share_encoder:\n self.user_encoder = UserEncoder(args, word_embedding, \n uet_encoder=self.news_encoder.text_encoders['title'],\n bing_encoder=self.news_encoder.text_encoders['title'],\n uet_reduce_linear=self.news_encoder.reduce_dim_linear,\n bing_reduce_linear=self.news_encoder.reduce_dim_linear)\n else:\n self.user_encoder = UserEncoder(args, word_embedding)\n\n self.criterion = nn.CrossEntropyLoss()\n\n def forward(self,\n input_ids,\n log_ids,\n log_mask,\n targets=None,\n uet_ids=None,\n uet_mask=None,\n bing_ids=None,\n bing_mask=None,\n compute_loss=True):\n \"\"\"\n Returns:\n click_probability: batch_size, 1 + K\n \"\"\"\n # input_ids: batch, history, num_words\n ids_length = input_ids.size(2)\n input_ids = input_ids.view(-1, ids_length)\n news_vec = self.news_encoder(input_ids)\n news_vec = news_vec.view(-1, 1 + self.args.npratio, self.args.num_attention_heads * 20)\n\n if self.args.debias:\n # 64, 64\n news_vec_debias = self.news_encoder_debias(input_ids)\n # 64, 1\n news_bias = self.debias_linear(news_vec_debias)\n # 32, 2\n news_bias = news_bias.view(-1, 1 + self.args.npratio)\n\n # batch_size, news_dim\n log_ids = log_ids.view(-1, ids_length)\n log_vec = self.news_encoder(log_ids)\n log_vec = log_vec.view(-1, self.args.user_log_length,\n self.args.num_attention_heads * 20)\n\n if self.args.process_uet or self.args.process_bing:\n user_vector, _ = self.user_encoder(log_vec, log_mask, uet_ids, uet_mask, bing_ids, bing_mask)\n else:\n user_vector = self.user_encoder(log_vec, log_mask, uet_ids, uet_mask, bing_ids, bing_mask)\n\n # batch_size, 2\n score = torch.bmm(news_vec, user_vector.unsqueeze(-1)).squeeze(\n dim=-1) + (news_bias if self.args.debias else 0)\n if compute_loss:\n loss = self.criterion(score, targets)\n return loss, score\n else:\n return score\n\n\nif __name__ == \"__main__\":\n from parameters import parse_args\n args = parse_args()\n args.news_attributes = ['title', 'abstract', 'category', 'domain', 'subcategory']\n args.debias=True\n args.process_uet = True\n args.process_bing = False\n args.user_log_mask=True\n args.padded_news_different_word_index = True\n #args.news_attributes = ['title', 'body']\n\n args.use_pretrain_news_encoder = True\n args.title_share_encoder = False\n args.debias = True\n args.uet_agg_method = 'weighted-sum'\n\n args.pretrain_news_encoder_path = \"./model_all/pretrain_textencoder/\"\n\n word_dict = torch.load(os.path.join(args.pretrain_news_encoder_path, 'word_dict.pkl'))\n\n embedding_matrix = np.random.uniform(size=(len(word_dict)+1, args.word_embedding_dim))\n model = Model(args, embedding_matrix, 10, 10, 10)\n model.cuda()\n length = args.num_words_title + args.num_words_abstract + args.num_words_body + 3\n input_ids = torch.ones((128, 2, length)).cuda()\n log_ids = torch.ones((128, 50, length)).cuda()\n log_mask = torch.rand((128, 50)).cuda().float()\n targets = torch.rand((128, )).cuda().long()\n uet_ids = torch.rand(128, 30, 16).cuda().long()\n uet_mask = torch.rand(128, 30).cuda().float()\n bing_ids = torch.LongTensor([]).cuda()\n bing_mask = torch.FloatTensor([]).cuda()\n print(model(input_ids, log_ids, log_mask, targets, uet_ids, uet_mask, bing_ids, bing_mask))\n"
] |
[
[
"torch.nn.Softmax",
"torch.mean",
"numpy.sqrt",
"torch.nn.functional.dropout",
"torch.cat",
"torch.sum",
"torch.nn.Embedding",
"torch.narrow",
"torch.FloatTensor",
"torch.nn.Embedding.from_pretrained",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.reshape",
"torch.from_numpy",
"torch.rand",
"torch.nn.functional.linear",
"torch.nn.Sequential",
"torch.LongTensor",
"torch.empty",
"torch.exp",
"torch.nn.Linear",
"torch.stack",
"torch.Tensor",
"torch.nn.Tanh",
"torch.matmul",
"numpy.random.normal",
"torch.nn.init.xavier_uniform_"
]
] |
nogawanogawa/image_search
|
[
"77367cecb27940891c3283ee40af1495c52b730e"
] |
[
"etl/main.py"
] |
[
"import os\nimport glob\nfrom metaflow import FlowSpec, step\nfrom lib.es.document import Document\nfrom lib.es.index import Index\nfrom step.extract.extract import *\nfrom lib.feature.src.embedding import *\nfrom lib.feature.src.phash import *\nfrom lib.feature.src.akaze import *\n\nimport torch\nfrom torchvision import models\nfrom torchvision.models import resnet34\nfrom torch import nn\nimport pickle\nfrom sklearn.cluster import KMeans\n\n\nmodel_name = '/app/model/kmeans_model.pkl'\n\nclass Workflow(FlowSpec):\n\n @step\n def start(self):\n self.next(self.initialize_index)\n\n @step\n def initialize_index(self):\n \"\"\"indexを削除し、初期のマッピングを作成\"\"\"\n index = Index()\n\n # indexの一覧を取得\n index_list = index.get_all()\n\n # 残っているindexの削除\n for i in index_list:\n res = index.delete(i)\n assert res[\"acknowledged\"] == True\n\n # mappingが存在するindexを作成\n path = os.getcwd()\n mapping_dir = os.path.join(path, \"lib/es/mapping\")\n mappings = glob.glob(os.path.join(mapping_dir, '*.json'))\n\n for mapping in mappings:\n filename = mapping.split(\"/\")[-1]\n index_name = filename.split(\".\")[0]\n res = index.create(index_name=index_name)\n assert res[\"acknowledged\"] == True\n\n self.next(self.build_model)\n\n @step\n def build_model(self):\n \"\"\" build kmeans model \"\"\"\n \n PATH = \"/app/images\"\n l = glob.glob(os.path.join(PATH, '*.jpg'))\n\n features = None\n for filepath in l:\n try:\n if features is None:\n features = get_akaze_feature(filepath)\n else :\n features = np.append(features, get_akaze_feature(filepath), axis=0)\n except:\n pass\n\n model = KMeans(n_clusters=20, init='k-means++', random_state=0).fit(features)\n pickle.dump(model, open(model_name, 'wb'))\n\n self.next(self.extract_load)\n\n @step\n def extract_load(self):\n \"\"\"dictの内容をESに挿入\"\"\"\n document = Document()\n\n PATH = \"/app/images\"\n l = glob.glob(os.path.join(PATH, '*.jpg'))\n dataset = ImageDataSet(PATH, l)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)\n\n model = models.resnet34(pretrained=True)\n model.fc = nn.Identity()\n\n kmeans_model = pickle.load(open(model_name, 'rb'))\n\n for i, (data, filepath) in enumerate(dataloader):\n\n with torch.no_grad():\n output = model(data)\n\n doc = {}\n\n filename = filepath[0].split(\"/\")[-1]\n\n doc[\"filename\"] = filename\n doc[\"phash\"] = str(get_hash(filepath[0]))\n doc[\"embedding\"] = output[0].tolist()\n\n d = {\n 0:0, 1:0, 2:0, 3:0, 4:0, \n 5:0, 6:0, 7:0, 8:0, 9:0, \n 10:0, 11:0, 12:0, 13:0, 14:0, \n 15:0, 16:0, 17:0, 18:0, 19:0 \n }\n\n try:\n features = get_akaze_feature(filepath[0])\n features = kmeans_model.predict(features)\n\n for f in features:\n d[f] = d[f] + 1\n\n except:\n pass\n \n doc.update(d)\n\n res = document.register(doc)\n #print(res)\n #print(filepath)\n\n self.next(self.end)\n\n @step\n def end(self):\n pass\n\n\nif __name__ == '__main__':\n Workflow()"
] |
[
[
"torch.no_grad",
"torch.nn.Identity",
"torch.utils.data.DataLoader",
"sklearn.cluster.KMeans"
]
] |
arokem/scipy
|
[
"4d15ee3e32d53a8bad00c0cf7d465ec27a5b876d"
] |
[
"scipy/fft/_pocketfft/tests/test_real_transforms.py"
] |
[
"from __future__ import division, print_function, absolute_import\n\nfrom os.path import join, dirname\n\nimport numpy as np\nfrom numpy.testing import (\n assert_array_almost_equal, assert_equal, assert_allclose)\nimport pytest\nfrom pytest import raises as assert_raises\n\nfrom scipy.fft._pocketfft.realtransforms import (\n dct, idct, dst, idst, dctn, idctn, dstn, idstn)\n\nfftpack_test_dir = join(dirname(__file__), '..', '..', '..', 'fftpack', 'tests')\n\nMDATA_COUNT = 8\nFFTWDATA_COUNT = 14\n\ndef is_longdouble_binary_compatible():\n try:\n one = np.frombuffer(\n b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80\\xff\\x3f\\x00\\x00\\x00\\x00\\x00\\x00',\n dtype='<f16')\n return one == np.longfloat(1.)\n except TypeError:\n return False\n\n\ndef get_reference_data():\n ref = getattr(globals(), '__reference_data', None)\n if ref is not None:\n return ref\n\n # Matlab reference data\n MDATA = np.load(join(fftpack_test_dir, 'test.npz'))\n X = [MDATA['x%d' % i] for i in range(MDATA_COUNT)]\n Y = [MDATA['y%d' % i] for i in range(MDATA_COUNT)]\n\n # FFTW reference data: the data are organized as follows:\n # * SIZES is an array containing all available sizes\n # * for every type (1, 2, 3, 4) and every size, the array dct_type_size\n # contains the output of the DCT applied to the input np.linspace(0, size-1,\n # size)\n FFTWDATA_DOUBLE = np.load(join(fftpack_test_dir, 'fftw_double_ref.npz'))\n FFTWDATA_SINGLE = np.load(join(fftpack_test_dir, 'fftw_single_ref.npz'))\n FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']\n assert len(FFTWDATA_SIZES) == FFTWDATA_COUNT\n\n if is_longdouble_binary_compatible():\n FFTWDATA_LONGDOUBLE = np.load(\n join(fftpack_test_dir, 'fftw_longdouble_ref.npz'))\n else:\n FFTWDATA_LONGDOUBLE = {k: v.astype(np.longfloat)\n for k,v in FFTWDATA_DOUBLE.items()}\n\n ref = {\n 'FFTWDATA_LONGDOUBLE': FFTWDATA_LONGDOUBLE,\n 'FFTWDATA_DOUBLE': FFTWDATA_DOUBLE,\n 'FFTWDATA_SINGLE': FFTWDATA_SINGLE,\n 'FFTWDATA_SIZES': FFTWDATA_SIZES,\n 'X': X,\n 'Y': Y\n }\n\n globals()['__reference_data'] = ref\n return ref\n\n\[email protected](params=range(FFTWDATA_COUNT))\ndef fftwdata_size(request):\n return get_reference_data()['FFTWDATA_SIZES'][request.param]\n\[email protected](params=range(MDATA_COUNT))\ndef mdata_x(request):\n return get_reference_data()['X'][request.param]\n\n\[email protected](params=range(MDATA_COUNT))\ndef mdata_xy(request):\n ref = get_reference_data()\n y = ref['Y'][request.param]\n x = ref['X'][request.param]\n return x, y\n\n\ndef fftw_dct_ref(type, size, dt):\n x = np.linspace(0, size-1, size).astype(dt)\n dt = np.result_type(np.float32, dt)\n if dt == np.double:\n data = get_reference_data()['FFTWDATA_DOUBLE']\n elif dt == np.float32:\n data = get_reference_data()['FFTWDATA_SINGLE']\n elif dt == np.longfloat:\n data = get_reference_data()['FFTWDATA_LONGDOUBLE']\n else:\n raise ValueError()\n y = (data['dct_%d_%d' % (type, size)]).astype(dt)\n return x, y, dt\n\n\ndef fftw_dst_ref(type, size, dt):\n x = np.linspace(0, size-1, size).astype(dt)\n dt = np.result_type(np.float32, dt)\n if dt == np.double:\n data = get_reference_data()['FFTWDATA_DOUBLE']\n elif dt == np.float32:\n data = get_reference_data()['FFTWDATA_SINGLE']\n elif dt == np.longfloat:\n data = get_reference_data()['FFTWDATA_LONGDOUBLE']\n else:\n raise ValueError()\n y = (data['dst_%d_%d' % (type, size)]).astype(dt)\n return x, y, dt\n\n\ndef ref_2d(func, x, **kwargs):\n \"\"\"Calculate 2-D reference data from a 1d transform\"\"\"\n x = np.array(x, copy=True)\n for row in range(x.shape[0]):\n x[row, :] = func(x[row, :], **kwargs)\n for col in range(x.shape[1]):\n x[:, col] = func(x[:, col], **kwargs)\n return x\n\n\ndef naive_dct1(x, norm=None):\n \"\"\"Calculate textbook definition version of DCT-I.\"\"\"\n x = np.array(x, copy=True)\n N = len(x)\n M = N-1\n y = np.zeros(N)\n m0, m = 1, 2\n if norm == 'ortho':\n m0 = np.sqrt(1.0/M)\n m = np.sqrt(2.0/M)\n for k in range(N):\n for n in range(1, N-1):\n y[k] += m*x[n]*np.cos(np.pi*n*k/M)\n y[k] += m0 * x[0]\n y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)\n if norm == 'ortho':\n y[0] *= 1/np.sqrt(2)\n y[N-1] *= 1/np.sqrt(2)\n return y\n\n\ndef naive_dst1(x, norm=None):\n \"\"\"Calculate textbook definition version of DST-I.\"\"\"\n x = np.array(x, copy=True)\n N = len(x)\n M = N+1\n y = np.zeros(N)\n for k in range(N):\n for n in range(N):\n y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)\n if norm == 'ortho':\n y *= np.sqrt(0.5/M)\n return y\n\n\ndef naive_dct4(x, norm=None):\n \"\"\"Calculate textbook definition version of DCT-IV.\"\"\"\n x = np.array(x, copy=True)\n N = len(x)\n y = np.zeros(N)\n for k in range(N):\n for n in range(N):\n y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))\n if norm == 'ortho':\n y *= np.sqrt(2.0/N)\n else:\n y *= 2\n return y\n\n\ndef naive_dst4(x, norm=None):\n \"\"\"Calculate textbook definition version of DST-IV.\"\"\"\n x = np.array(x, copy=True)\n N = len(x)\n y = np.zeros(N)\n for k in range(N):\n for n in range(N):\n y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))\n if norm == 'ortho':\n y *= np.sqrt(2.0/N)\n else:\n y *= 2\n return y\n\n\[email protected]('dtype', [np.complex64, np.complex128, np.longcomplex])\[email protected]('transform', [dct, dst, idct, idst])\ndef test_complex(transform, dtype):\n y = transform(1j*np.arange(5, dtype=dtype))\n x = 1j*transform(np.arange(5))\n assert_array_almost_equal(x, y)\n\n\n# map (tranform, dtype, type) -> decimal\ndec_map = {\n # DCT\n (dct, np.double, 1): 13,\n (dct, np.float32, 1): 6,\n\n (dct, np.double, 2): 14,\n (dct, np.float32, 2): 5,\n\n (dct, np.double, 3): 14,\n (dct, np.float32, 3): 5,\n\n (dct, np.double, 4): 13,\n (dct, np.float32, 4): 6,\n\n # IDCT\n (idct, np.double, 1): 14,\n (idct, np.float32, 1): 6,\n\n (idct, np.double, 2): 14,\n (idct, np.float32, 2): 5,\n\n (idct, np.double, 3): 14,\n (idct, np.float32, 3): 5,\n\n (idct, np.double, 4): 14,\n (idct, np.float32, 4): 6,\n\n # DST\n (dst, np.double, 1): 13,\n (dst, np.float32, 1): 6,\n\n (dst, np.double, 2): 14,\n (dst, np.float32, 2): 6,\n\n (dst, np.double, 3): 14,\n (dst, np.float32, 3): 7,\n\n (dst, np.double, 4): 13,\n (dst, np.float32, 4): 6,\n\n # IDST\n (idst, np.double, 1): 14,\n (idst, np.float32, 1): 6,\n\n (idst, np.double, 2): 14,\n (idst, np.float32, 2): 6,\n\n (idst, np.double, 3): 14,\n (idst, np.float32, 3): 6,\n\n (idst, np.double, 4): 14,\n (idst, np.float32, 4): 6,\n}\n\nfor k,v in dec_map.copy().items():\n if k[1] == np.double:\n dec_map[(k[0], np.longdouble, k[2])] = v\n elif k[1] == np.float32:\n dec_map[(k[0], int, k[2])] = v\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\[email protected]('type', [1, 2, 3, 4])\nclass TestDCT:\n def test_definition(self, rdt, type, fftwdata_size):\n x, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt)\n y = dct(x, type=type)\n assert_equal(y.dtype, dt)\n dec = dec_map[(dct, rdt, type)]\n assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))\n\n @pytest.mark.parametrize('size', [7, 8, 9, 16, 32, 64])\n def test_axis(self, rdt, type, size):\n nt = 2\n dec = dec_map[(dct, rdt, type)]\n x = np.random.randn(nt, size)\n y = dct(x, type=type)\n for j in range(nt):\n assert_array_almost_equal(y[j], dct(x[j], type=type),\n decimal=dec)\n\n x = x.T\n y = dct(x, axis=0, type=type)\n for j in range(nt):\n assert_array_almost_equal(y[:,j], dct(x[:,j], type=type),\n decimal=dec)\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\ndef test_dct1_definition_ortho(rdt, mdata_x):\n # Test orthornomal mode.\n dec = dec_map[(dct, rdt, 1)]\n x = np.array(mdata_x, dtype=rdt)\n dt = np.result_type(np.float32, rdt)\n y = dct(x, norm='ortho', type=1)\n y2 = naive_dct1(x, norm='ortho')\n assert_equal(y.dtype, dt)\n assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\ndef test_dct2_definition_matlab(mdata_xy, rdt):\n # Test correspondence with matlab (orthornomal mode).\n dt = np.result_type(np.float32, rdt)\n x = np.array(mdata_xy[0], dtype=dt)\n\n yr = mdata_xy[1]\n y = dct(x, norm=\"ortho\", type=2)\n dec = dec_map[(dct, rdt, 2)]\n assert_equal(y.dtype, dt)\n assert_array_almost_equal(y, yr, decimal=dec)\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\ndef test_dct3_definition_ortho(mdata_x, rdt):\n # Test orthornomal mode.\n x = np.array(mdata_x, dtype=rdt)\n dt = np.result_type(np.float32, rdt)\n y = dct(x, norm='ortho', type=2)\n xi = dct(y, norm=\"ortho\", type=3)\n dec = dec_map[(dct, rdt, 3)]\n assert_equal(xi.dtype, dt)\n assert_array_almost_equal(xi, x, decimal=dec)\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\ndef test_dct4_definition_ortho(mdata_x, rdt):\n # Test orthornomal mode.\n x = np.array(mdata_x, dtype=rdt)\n dt = np.result_type(np.float32, rdt)\n y = dct(x, norm='ortho', type=4)\n y2 = naive_dct4(x, norm='ortho')\n dec = dec_map[(dct, rdt, 4)]\n assert_equal(y.dtype, dt)\n assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\[email protected]('type', [1, 2, 3, 4])\ndef test_idct_definition(fftwdata_size, rdt, type):\n xr, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt)\n x = idct(yr, type=type)\n dec = dec_map[(idct, rdt, type)]\n assert_equal(x.dtype, dt)\n assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\[email protected]('type', [1, 2, 3, 4])\ndef test_definition(fftwdata_size, rdt, type):\n xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt)\n y = dst(xr, type=type)\n dec = dec_map[(dst, rdt, type)]\n assert_equal(y.dtype, dt)\n assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\ndef test_dst1_definition_ortho(rdt, mdata_x):\n # Test orthornomal mode.\n dec = dec_map[(dst, rdt, 1)]\n x = np.array(mdata_x, dtype=rdt)\n dt = np.result_type(np.float32, rdt)\n y = dst(x, norm='ortho', type=1)\n y2 = naive_dst1(x, norm='ortho')\n assert_equal(y.dtype, dt)\n assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\ndef test_dst4_definition_ortho(rdt, mdata_x):\n # Test orthornomal mode.\n dec = dec_map[(dst, rdt, 4)]\n x = np.array(mdata_x, dtype=rdt)\n dt = np.result_type(np.float32, rdt)\n y = dst(x, norm='ortho', type=4)\n y2 = naive_dst4(x, norm='ortho')\n assert_equal(y.dtype, dt)\n assert_array_almost_equal(y, y2, decimal=dec)\n\n\[email protected]('rdt', [np.longfloat, np.double, np.float32, int])\[email protected]('type', [1, 2, 3, 4])\ndef test_idst_definition(fftwdata_size, rdt, type):\n xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt)\n x = idst(yr, type=type)\n dec = dec_map[(idst, rdt, type)]\n assert_equal(x.dtype, dt)\n assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))\n\n\[email protected]('routine', [dct, dst, idct, idst])\[email protected]('dtype', [np.float32, np.float64, np.longfloat])\[email protected]('shape, axis', [\n ((16,), -1), ((16, 2), 0), ((2, 16), 1)\n])\[email protected]('type', [1, 2, 3, 4])\[email protected]('overwrite_x', [True, False])\[email protected]('norm', [None, 'ortho'])\ndef test_overwrite(routine, dtype, shape, axis, type, norm, overwrite_x):\n # Check input overwrite behavior\n np.random.seed(1234)\n if np.issubdtype(dtype, np.complexfloating):\n x = np.random.randn(*shape) + 1j*np.random.randn(*shape)\n else:\n x = np.random.randn(*shape)\n x = x.astype(dtype)\n x2 = x.copy()\n routine(x2, type, None, axis, norm, overwrite_x=overwrite_x)\n\n sig = \"%s(%s%r, %r, axis=%r, overwrite_x=%r)\" % (\n routine.__name__, x.dtype, x.shape, None, axis, overwrite_x)\n if not overwrite_x:\n assert_equal(x2, x, err_msg=\"spurious overwrite in %s\" % sig)\n\n\nclass Test_DCTN_IDCTN(object):\n dec = 14\n dct_type = [1, 2, 3, 4]\n norms = [None, 'ortho']\n rstate = np.random.RandomState(1234)\n shape = (32, 16)\n data = rstate.randn(*shape)\n\n @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),\n (dstn, idstn)])\n @pytest.mark.parametrize('axes', [None,\n 1, (1,), [1],\n 0, (0,), [0],\n (0, 1), [0, 1],\n (-2, -1), [-2, -1]])\n @pytest.mark.parametrize('dct_type', dct_type)\n @pytest.mark.parametrize('norm', ['ortho'])\n def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):\n tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)\n tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)\n assert_array_almost_equal(self.data, tmp, decimal=12)\n\n @pytest.mark.parametrize('funcn,func', [(dctn, dct), (dstn, dst)])\n @pytest.mark.parametrize('dct_type', dct_type)\n @pytest.mark.parametrize('norm', norms)\n def test_dctn_vs_2d_reference(self, funcn, func, dct_type, norm):\n y1 = funcn(self.data, type=dct_type, axes=None, norm=norm)\n y2 = ref_2d(func, self.data, type=dct_type, norm=norm)\n assert_array_almost_equal(y1, y2, decimal=11)\n\n @pytest.mark.parametrize('funcn,func', [(idctn, idct), (idstn, idst)])\n @pytest.mark.parametrize('dct_type', dct_type)\n @pytest.mark.parametrize('norm', [None, 'ortho'])\n def test_idctn_vs_2d_reference(self, funcn, func, dct_type, norm):\n fdata = dctn(self.data, type=dct_type, norm=norm)\n y1 = funcn(fdata, type=dct_type, norm=norm)\n y2 = ref_2d(func, fdata, type=dct_type, norm=norm)\n assert_array_almost_equal(y1, y2, decimal=11)\n\n @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),\n (dstn, idstn)])\n def test_axes_and_shape(self, fforward, finverse):\n with assert_raises(ValueError,\n match=\"when given, axes and shape arguments\"\n \" have to be of the same length\"):\n fforward(self.data, s=self.data.shape[0], axes=(0, 1))\n\n with assert_raises(ValueError,\n match=\"when given, axes and shape arguments\"\n \" have to be of the same length\"):\n fforward(self.data, s=self.data.shape, axes=0)\n\n @pytest.mark.parametrize('fforward', [dctn, dstn])\n def test_shape(self, fforward):\n tmp = fforward(self.data, s=(128, 128), axes=None)\n assert_equal(tmp.shape, (128, 128))\n\n @pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),\n (dstn, idstn)])\n @pytest.mark.parametrize('axes', [1, (1,), [1],\n 0, (0,), [0]])\n def test_shape_is_none_with_axes(self, fforward, finverse, axes):\n tmp = fforward(self.data, s=None, axes=axes, norm='ortho')\n tmp = finverse(tmp, s=None, axes=axes, norm='ortho')\n assert_array_almost_equal(self.data, tmp, decimal=self.dec)\n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"numpy.issubdtype",
"numpy.max",
"numpy.random.randn",
"scipy.fft._pocketfft.realtransforms.dst",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.sin",
"numpy.frombuffer",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"scipy.fft._pocketfft.realtransforms.dctn",
"scipy.fft._pocketfft.realtransforms.idct",
"numpy.array",
"numpy.random.RandomState",
"numpy.longfloat",
"scipy.fft._pocketfft.realtransforms.dct",
"scipy.fft._pocketfft.realtransforms.idst",
"numpy.random.seed",
"numpy.cos",
"numpy.result_type"
]
] |
bhsimon0810/text-classification
|
[
"01f303e7ac75341f9881a9f4af8d0a1d4c05c8aa"
] |
[
"textcnn/model.py"
] |
[
"import tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\nclass TextCNN(object):\r\n \"\"\"\r\n A CNN for text classification.\r\n Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.\r\n \"\"\"\r\n\r\n def __init__(\r\n self, sequence_length, num_class, vocab_size,\r\n embedding_size, filter_sizes, num_filter, pretrained_embeddings, l2_reg_lambda=0.0):\r\n\r\n # placeholders\r\n self.inputs = tf.compat.v1.placeholder(tf.int32, [None, sequence_length], name=\"inputs\")\r\n self.labels = tf.compat.v1.placeholder(tf.int32, [None], name=\"labels\")\r\n self.dropout_keep_prob = tf.compat.v1.placeholder(tf.float32, name=\"dropout_keep_prob\")\r\n\r\n # embedding layer\r\n with tf.device('/cpu:0'), tf.compat.v1.variable_scope(\"embedding\"):\r\n self.embedding_matrix = tf.compat.v1.get_variable(\r\n name=\"embedding_matrix\",\r\n shape=[vocab_size, embedding_size],\r\n initializer=tf.constant_initializer(pretrained_embeddings),\r\n dtype=tf.float32)\r\n\r\n # with tf.device('/cpu:0'), tf.name_scope(\"embedding\"):\r\n # self.embedding_matrix = tf.Variable(\r\n # tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),\r\n # name=\"W\")\r\n self.embedded_inputs = tf.expand_dims(tf.nn.embedding_lookup(self.embedding_matrix, self.inputs), -1)\r\n\r\n # create a convolution + maxpool layer for each filter size\r\n pooled_outputs = []\r\n for i, filter_size in enumerate(filter_sizes):\r\n with tf.compat.v1.name_scope(\"conv-maxpool-%s\" % filter_size):\r\n # Convolution Layer\r\n filter_shape = [filter_size, embedding_size, 1, num_filter]\r\n W = tf.compat.v1.Variable(tf.random.truncated_normal(filter_shape, stddev=0.1), name=\"W\")\r\n b = tf.compat.v1.Variable(tf.constant(0.1, shape=[num_filter]), name=\"b\")\r\n conv = tf.nn.conv2d(\r\n self.embedded_inputs,\r\n W,\r\n strides=[1, 1, 1, 1],\r\n padding=\"VALID\",\r\n name=\"conv\")\r\n # Apply nonlinearity\r\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name=\"relu\")\r\n # Maxpooling over the outputs\r\n pooled = tf.nn.max_pool2d(\r\n h,\r\n ksize=[1, sequence_length - filter_size + 1, 1, 1],\r\n strides=[1, 1, 1, 1],\r\n padding='VALID',\r\n name=\"pool\")\r\n pooled_outputs.append(pooled)\r\n\r\n # combine all the pooled features\r\n num_filters_total = num_filter * len(filter_sizes)\r\n self.h_pool = tf.concat(pooled_outputs, 3)\r\n self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])\r\n\r\n # add dropout\r\n with tf.compat.v1.name_scope(\"dropout\"):\r\n self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)\r\n\r\n # final (unnormalized) scores and predictions\r\n with tf.compat.v1.name_scope(\"output\"):\r\n W = tf.compat.v1.get_variable(\r\n \"W\",\r\n shape=[num_filters_total, num_class],\r\n initializer=tf.contrib.layers.xavier_initializer())\r\n b = tf.Variable(tf.constant(0.1, shape=[num_class]), name=\"b\")\r\n self.logits = tf.compat.v1.nn.xw_plus_b(self.h_drop, W, b, name=\"logits\")\r\n self.predictions = tf.argmax(self.logits, 1, name=\"predictions\")\r\n\r\n # calculate mean cross-entropy loss\r\n with tf.name_scope(\"loss\"):\r\n labels = tf.one_hot(self.labels, depth=num_class)\r\n losses = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels, logits=self.logits)\r\n l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables()]) * l2_reg_lambda\r\n self.loss = tf.reduce_mean(losses) + l2_reg\r\n\r\n # accuracy\r\n with tf.name_scope(\"accuracy\"):\r\n correct_predictions = tf.equal(self.predictions, tf.argmax(labels, 1))\r\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, \"float\"), name=\"accuracy\")\r\n"
] |
[
[
"tensorflow.device",
"tensorflow.concat",
"tensorflow.cast",
"tensorflow.nn.l2_loss",
"tensorflow.nn.conv2d",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.name_scope",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.argmax",
"tensorflow.compat.v1.variable_scope",
"tensorflow.nn.dropout",
"tensorflow.compat.v1.name_scope",
"tensorflow.random.truncated_normal",
"tensorflow.nn.max_pool2d",
"tensorflow.one_hot",
"tensorflow.compat.v1.losses.softmax_cross_entropy",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.bias_add",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.compat.v1.nn.xw_plus_b",
"tensorflow.constant_initializer",
"tensorflow.compat.v1.placeholder"
]
] |
DarioRugg/Self-Supervised_Pearson_Search
|
[
"731ff0888b336076ec42c26808417809fb78e3cf"
] |
[
"lib/datasets/cuhk_sysu.py"
] |
[
"import os.path as osp\nimport numpy as np\nimport torch\nfrom scipy.io import loadmat\nfrom sklearn.metrics import average_precision_score\n#from numba import jit\n\nfrom .ps_dataset import PersonSearchDataset\n#from ..utils.serialization import pickle, unpickle\nfrom ..utils.evaluator import _compute_iou\n\n\nclass CUHK_SYSU(PersonSearchDataset):\n\n def get_data_path(self):\n return osp.join(self.root, 'Image', 'SSM')\n\n def gt_roidb(self):\n cache_file = osp.join(self.root, 'cache',\n 'CUHK-SYSU_{}_gt_roidb.pkl'.format(self.mode))\n\n #if osp.isfile(cache_file):\n #roidb = unpickle(cache_file)\n # return roidb\n\n # Load all images and build a dict from image to boxes\n all_imgs = loadmat(\n osp.join(self.root, 'annotation', 'Images.mat'))\n all_imgs = all_imgs['Img'].squeeze()\n name_to_boxes = {}\n name_to_pids = {}\n for im_name, _, boxes in all_imgs:\n im_name = str(im_name[0])\n boxes = np.asarray([b[0] for b in boxes[0]])\n boxes = boxes.reshape(boxes.shape[0], 4) # (x1, y1, w, h)\n valid_index = np.where((boxes[:, 2] > 0) & (boxes[:, 3] > 0))[0]\n assert valid_index.size > 0, \\\n 'Warning: {} has no valid boxes.'.format(im_name)\n boxes = boxes[valid_index]\n name_to_boxes[im_name] = boxes.astype(np.int32)\n name_to_pids[im_name] = -1 * \\\n np.ones(boxes.shape[0], dtype=np.int32)\n\n def _set_box_pid(boxes, box, pids, pid):\n for i in range(boxes.shape[0]):\n if np.all(boxes[i] == box):\n pids[i] = pid\n return\n print('Warning: person {} box {} cannot find in Images'.format(pid, box))\n\n # Load all the train/probe/test persons and number their pids from 0 to N-1\n # Background people have pid == -1\n if self.mode == 'train':\n train = loadmat(osp.join(self.root,\n 'annotation/test/train_test/Train.mat'))\n train = train['Train'].squeeze()\n for index, item in enumerate(train):\n scenes = item[0, 0][2].squeeze()\n for im_name, box, __ in scenes:\n im_name = str(im_name[0])\n box = box.squeeze().astype(np.int32)\n _set_box_pid(name_to_boxes[im_name], box,\n name_to_pids[im_name], index)\n else:\n test = loadmat(osp.join(self.root,\n 'annotation/test/train_test/TestG50.mat'))\n test = test['TestG50'].squeeze()\n for index, item in enumerate(test):\n # query\n im_name = str(item['Query'][0, 0][0][0])\n box = item['Query'][0, 0][1].squeeze().astype(np.int32)\n _set_box_pid(name_to_boxes[im_name], box,\n name_to_pids[im_name], index)\n # gallery\n gallery = item['Gallery'].squeeze()\n for im_name, box, __ in gallery:\n im_name = str(im_name[0])\n if box.size == 0:\n break\n box = box.squeeze().astype(np.int32)\n _set_box_pid(name_to_boxes[im_name], box,\n name_to_pids[im_name], index)\n\n # Construct the gt_roidb\n gt_roidb = []\n for im_name in self.imgs:\n boxes = name_to_boxes[im_name]\n # is_hard = np.array([1 if h < 50.0 else 0 for h in boxes[:,3]])[:, np.newaxis]\n boxes[:, 2] += boxes[:, 0]\n boxes[:, 3] += boxes[:, 1] # (x1, y1, x2, y2)\n pids = name_to_pids[im_name]\n # num_objs = len(boxes)\n # overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # overlaps[:, 1] = 1.0\n # overlaps = csr_matrix(overlaps) # scipy.sparse.csr_matrix\n gt_roidb.append({\n 'im_name': im_name,\n 'boxes': boxes,\n # 'gt_overlaps': overlaps,\n # 'gt_ishard': is_hard,\n 'gt_pids': pids,\n 'flipped': False})\n\n # pickle(gt_roidb, cache_file)\n # print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes for the specific subset (train / test).\n For PSDB, the index is just the image file name.\n \"\"\"\n # test pool\n test = loadmat(osp.join(self.root, 'annotation', 'pool.mat'))\n test = test['pool'].squeeze()\n test = [str(a[0]) for a in test]\n if self.mode in ('test', 'probe'):\n return test\n # all images\n all_imgs = loadmat(\n osp.join(self.root, 'annotation', 'Images.mat'))\n all_imgs = all_imgs['Img'].squeeze()\n all_imgs = [str(a[0][0]) for a in all_imgs]\n # training\n return list(set(all_imgs) - set(test))\n\n def _adapt_pid_to_cls(self, label_pids, upid=5555):\n \"\"\"\n convert pid range from (0, N-1) to (1, N), and replace -1 with unlabeled_person_identifier 5555\n \"\"\"\n label_pids += 1\n label_pids += (label_pids == 0).type(torch.int64) * upid\n return label_pids\n\n def load_probes(self):\n protoc = loadmat(osp.join(self.root,\n 'annotation/test/train_test/TestG50.mat'))['TestG50'].squeeze()\n probes = []\n for item in protoc['Query']:\n im_name = str(item['imname'][0, 0][0])\n roi = item['idlocate'][0, 0][0].astype(np.int32)\n roi[2:] += roi[:2]\n probes.append({'im_name': im_name,\n 'boxes': roi[np.newaxis, :],\n 'gt_classes': np.array([1]),\n # Useless. Can be set to any value.\n 'gt_pids': np.array([-100]),\n 'flipped': False})\n return probes\n\n @staticmethod\n # @jit(forceobj=True)\n def search_performance_calc(gallery_set, probe_set,\n gallery_det, gallery_feat, probe_feat,\n det_thresh=0.5, gallery_size=100):\n \"\"\"\n gallery_det (list of ndarray): n_det x [x1, x2, y1, y2, score] per image\n gallery_feat (list of ndarray): n_det x D features per image\n probe_feat (list of ndarray): D dimensional features per probe image\n\n det_thresh (float): filter out gallery detections whose scores below this\n gallery_size (int): gallery size [-1, 50, 100, 500, 1000, 2000, 4000]\n -1 for using full set\n \"\"\"\n assert len(gallery_set) == len(gallery_det)\n assert len(gallery_set) == len(gallery_feat)\n assert len(probe_set) == len(probe_feat)\n\n use_full_set = gallery_size == -1\n fname = 'TestG{}'.format(gallery_size if not use_full_set else 50)\n protoc = loadmat(osp.join(gallery_set.root, 'annotation/test/train_test',\n fname + '.mat'))[fname].squeeze()\n\n # mapping from gallery image to (det, feat)\n gt_roidb = gallery_set.record\n name_to_det_feat = {}\n for gt, det, feat in zip(gt_roidb, gallery_det, gallery_feat):\n name = gt['im_name']\n if det != []:\n scores = det[:, 4].ravel()\n inds = np.where(scores >= det_thresh)[0]\n if len(inds) > 0:\n gt_boxes = gt['boxes']\n det_boxes, reID_feat_det = det[inds], feat[inds],\n box_true = []\n num_gt, num_det = gt_boxes.shape[0], det_boxes.shape[0]\n\n # tag if detection is correct; could be skipped.\n ious = np.zeros((num_gt, num_det), dtype=np.float32)\n for i in xrange(num_gt):\n for j in xrange(num_det):\n ious[i, j] = _compute_iou(gt_boxes[i], det[j, :4])\n tfmat = (ious >= 0.5)\n # for each det, keep only the largest iou of all the gt\n for j in xrange(num_det):\n largest_ind = np.argmax(ious[:, j])\n for i in xrange(num_gt):\n if i != largest_ind:\n tfmat[i, j] = False\n # for each gt, keep only the largest iou of all the det\n for i in xrange(num_gt):\n largest_ind = np.argmax(ious[i, :])\n for j in xrange(num_det):\n if j != largest_ind:\n tfmat[i, j] = False\n for j in xrange(num_det):\n if tfmat[:, j].any():\n box_true.append(True)\n else:\n box_true.append(False)\n\n assert len(box_true) == len(det_boxes)\n name_to_det_feat[name] = (\n det_boxes, reID_feat_det, np.array(box_true))\n\n aps = []\n accs = []\n topk = [1, 5, 10]\n ret = {'image_root': gallery_set.data_path, 'results': []}\n for i in xrange(len(probe_set)):\n y_true, y_score, y_true_box = [], [], []\n imgs, rois = [], []\n count_gt, count_tp = 0, 0\n # Get L2-normalized feature vector\n feat_p = probe_feat[i].ravel()\n # Ignore the probe image\n probe_imname = str(protoc['Query'][i]['imname'][0, 0][0])\n probe_roi = protoc['Query'][i][\n 'idlocate'][0, 0][0].astype(np.int32)\n probe_roi[2:] += probe_roi[:2]\n probe_gt = []\n tested = set([probe_imname])\n # 1. Go through the gallery samples defined by the protocol\n for item in protoc['Gallery'][i].squeeze():\n gallery_imname = str(item[0][0])\n # some contain the probe (gt not empty), some not\n gt = item[1][0].astype(np.int32)\n count_gt += (gt.size > 0)\n # compute distance between probe and gallery dets\n if gallery_imname not in name_to_det_feat:\n continue\n det, feat_g, box_true = name_to_det_feat[gallery_imname]\n # get L2-normalized feature matrix NxD\n assert feat_g.size == np.prod(feat_g.shape[:2])\n feat_g = feat_g.reshape(feat_g.shape[:2])\n # compute cosine similarities\n sim = feat_g.dot(feat_p).ravel()\n # assign label for each det\n label = np.zeros(len(sim), dtype=np.int32)\n if gt.size > 0:\n w, h = gt[2], gt[3]\n gt[2:] += gt[:2]\n probe_gt.append({'img': str(gallery_imname),\n 'roi': map(float, list(gt))})\n iou_thresh = min(0.5, (w * h * 1.0) /\n ((w + 10) * (h + 10)))\n inds = np.argsort(sim)[::-1]\n sim = sim[inds]\n det = det[inds]\n box_true = box_true[inds]\n # only set the first matched det as true positive\n for j, roi in enumerate(det[:, :4]):\n if _compute_iou(roi, gt) >= iou_thresh:\n label[j] = 1\n count_tp += 1\n break\n y_true.extend(list(label))\n y_score.extend(list(sim))\n y_true_box.extend(list(box_true))\n imgs.extend([gallery_imname] * len(sim))\n rois.extend(list(det))\n tested.add(gallery_imname)\n # 2. Go through the remaining gallery images if using full set\n if use_full_set:\n for gallery_imname in gallery_set.imgs:\n if gallery_imname in tested:\n continue\n if gallery_imname not in name_to_det_feat:\n continue\n det, feat_g, box_true = name_to_det_feat[gallery_imname]\n # get L2-normalized feature matrix NxD\n assert feat_g.size == np.prod(feat_g.shape[:2])\n feat_g = feat_g.reshape(feat_g.shape[:2])\n # compute cosine similarities\n sim = feat_g.dot(feat_p).ravel()\n # guaranteed no target probe in these gallery images\n label = np.zeros(len(sim), dtype=np.int32)\n y_true.extend(list(label))\n y_score.extend(list(sim))\n y_true_box.extend(list(box_true))\n imgs.extend([gallery_imname] * len(sim))\n rois.extend(list(det))\n # 3. Compute AP for this probe (need to scale by recall rate)\n y_score = np.asarray(y_score)\n y_true = np.asarray(y_true)\n y_true_box = np.asarray(y_true_box)\n assert count_tp <= count_gt\n recall_rate = count_tp * 1.0 / count_gt\n ap = 0 if count_tp == 0 else \\\n average_precision_score(y_true, y_score) * recall_rate\n aps.append(ap)\n inds = np.argsort(y_score)[::-1]\n y_score = y_score[inds]\n y_true = y_true[inds]\n y_true_box = y_true_box[inds]\n accs.append([min(1, sum(y_true[:k])) for k in topk])\n # 4. Save result for JSON dump\n new_entry = {'probe_img': str(probe_imname),\n 'probe_roi': map(float, list(probe_roi)),\n 'probe_gt': probe_gt,\n 'gallery': []}\n # only save top-10 predictions\n for k in xrange(10):\n new_entry['gallery'].append({\n 'img': str(imgs[inds[k]]),\n 'roi': map(float, list(rois[inds[k]])),\n 'score': float(y_score[k]),\n 'correct': int(y_true[k]),\n 'det_correct': int(y_true_box[k]),\n })\n ret['results'].append(new_entry)\n\n print('search ranking:')\n print(' mAP = {:.2%}'.format(np.mean(aps)))\n accs = np.mean(accs, axis=0)\n for i, k in enumerate(topk):\n print(' top-{:2d} = {:.2%}'.format(k, accs[i]))\n\n ret['mAP'] = np.mean(aps)\n ret['accs'] = accs\n\n return ret\n"
] |
[
[
"numpy.asarray",
"numpy.ones",
"numpy.all",
"numpy.argmax",
"numpy.mean",
"sklearn.metrics.average_precision_score",
"numpy.prod",
"numpy.argsort",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
] |
t9s9/BeeMeter
|
[
"d0dfbf621a9147c047708a18540ba61266324176",
"d0dfbf621a9147c047708a18540ba61266324176"
] |
[
"detection/dataset/data_analysis.py",
"jetson_nano/model.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.cluster import KMeans\nfrom tqdm import tqdm\n\nfrom detection.dataset.clustering import avg_iou, kmeans_iou\nfrom detection.dataset.dataset_generator import Dataset\n\n\nclass Analyser:\n def __init__(self, model_input_size, dataset):\n\n self.img_size = dataset.img_width, dataset.img_height\n self.input_size = model_input_size\n self.labels = np.concatenate(dataset.labels).astype(dtype=np.float64)\n\n self.rescale_labels()\n\n def rescale_labels(self):\n # scale the predicted boxes to the size of the models input images\n self.labels[..., [0, 2]] = self.labels[..., [0, 2]] * (self.input_size[0] / self.img_size[0])\n self.labels[..., [1, 3]] = self.labels[..., [1, 3]] * (self.input_size[1] / self.img_size[1])\n\n def to_relative(self):\n self.labels[..., [0, 2]] /= self.img_size[0]\n self.labels[..., [1, 3]] /= self.img_size[1]\n\n def get_sides(self):\n \"\"\"\n\n :return: Shape: (#boxes, 2) with last axis: (width, height)\n \"\"\"\n side_len = self.labels[..., [2, 3]] - self.labels[..., [0, 1]]\n return side_len\n\n def get_ratios(self):\n \"\"\"\n\n :return: Shape: (#boxes, )\n \"\"\"\n sides = self.get_sides()\n return sides[..., 0] / sides[..., 1]\n\n def get_centers(self):\n return (self.labels[..., [0, 1]] + self.labels[..., [2, 3]]) / 2\n\n def ratio_histo(self):\n # ration = width / height\n ratios = self.get_ratios()\n plt.hist(ratios, bins=80, alpha=0.5)\n\n mean = np.mean(ratios)\n std = np.std(ratios)\n print(\"Mean: {0:.2f} Std: {1:.2f}\".format(mean, std))\n\n plt.axvline(mean, color='r', linestyle='--')\n plt.axvline(mean + std, color='g', linestyle='--')\n plt.axvline(mean - std, color='g', linestyle='--')\n plt.xlabel('Seitenverhältnis (Breite / Höhe)')\n plt.ylabel('Anzahl Objekte')\n plt.xlim(0.0, 2.0)\n plt.show()\n\n def ratio_hist2d(self):\n sides = self.get_sides()\n min_w, max_w = np.min(sides[..., 0]), np.max(sides[..., 0])\n min_h, max_h = np.min(sides[..., 1]), np.max(sides[..., 1])\n hist = plt.hist2d(x=sides[..., 0], y=sides[..., 1], bins=min(max_w - min_w, max_h - min_h), cmap='cividis')\n plt.xticks(hist[1], range(min_w, max_w + 1, 1))\n plt.yticks(hist[2], range(min_h, max_h + 1, 1))\n plt.xlabel('width')\n plt.ylabel('height')\n cb = plt.colorbar(hist[3])\n cb.set_label('count')\n plt.show()\n\n def scatter_ratio(self, centers, labels=None, name='kMeans', show=True, save=True):\n sides = self.get_sides()\n fig, ax = plt.subplots(figsize=(8, 8))\n ax.scatter(sides[:, 0], sides[:, 1], c=labels, s=50, cmap='viridis', alpha=0.5)\n ax.scatter(centers[:, 0], centers[:, 1], marker='x', s=180, linewidths=4,\n color='w', zorder=10)\n ax.set_xlabel('Boxbreite [px]')\n ax.set_ylabel('Boxhöhe [px]')\n ax.set_xlim((0, np.max(sides)))\n ax.set_ylim((0, np.max(sides)))\n if show:\n plt.show()\n if save:\n fig.savefig(name)\n\n def ratio_euclid_means(self, c=3, show=False, verbose=True, save=False, tries=10):\n sides = self.get_sides()\n best = [0.0, None, None]\n\n for _ in tqdm(range(tries), desc='Groups: {0}'.format(c)):\n kmeans = KMeans(n_clusters=c, verbose=0, random_state=None)\n res = kmeans.fit(sides)\n centers = res.cluster_centers_\n labels = res.labels_\n accuracy = avg_iou(sides, centers)\n if accuracy > best[0]:\n best = [accuracy, centers, labels]\n centers, labels = best[1], best[2]\n ratios = np.around(centers[:, 0] / centers[:, 1], decimals=3).tolist()\n ratios = sorted(ratios)\n\n if verbose:\n ratios = centers[:, 0] / centers[:, 1]\n print(\"Accuracy: {:.2f}%\".format(avg_iou(sides, centers) * 100))\n print(\"Boxes:\\n {}\".format(centers))\n print(\"Ratios:\\n {}\".format([round(i, 2) for i in sorted(ratios)]))\n if show or save:\n self.scatter_ratio(centers, labels=labels, show=show, save=save,\n name='kMeans_{0}_{1:.3f}.png'.format(c, best[0]))\n\n return {'groups': c, 'dist': 'euclid', 'accuracy': best[0], 'rations': ratios}\n\n def ratio_iou_means(self, c=5, show=False, save=False, verbose=True, dist=np.median, tries=10):\n sides = self.get_sides()\n best = [0.0, None, None]\n for _ in tqdm(range(tries), desc='Groups: {0}'.format(c)):\n centers, labels = kmeans_iou(sides, k=c, dist=dist)\n accuracy = avg_iou(sides, centers)\n if accuracy > best[0]:\n best = [accuracy, centers, labels]\n centers, labels = best[1], best[2]\n ratios = np.around(centers[:, 0] / centers[:, 1], decimals=3).tolist()\n ratios = sorted(ratios)\n\n str_dist = 'mean' if dist == np.mean else 'median'\n if verbose:\n print(\"Accuracy: {:.2f}%\".format(best[0] * 100))\n print(\"Boxes:\\n {}\".format(centers))\n print(\"Boxes norm:\\n {}\".format(centers / 240))\n print(\"Ratios:\\n {}\".format(ratios))\n if show or save:\n self.scatter_ratio(centers, labels=labels, show=show, save=save,\n name='iou_kMeans_{0}_{1:.3f}_{2}.png'.format(c, best[0], str_dist))\n return {'groups': c, 'dist': str_dist, 'accuracy': best[0], 'rations': ratios}\n\n def test_kmeans_iou(self, cmin, cmax, tries=10, dist=np.median, save=True):\n collection = []\n for c in tqdm(range(cmin, cmax + 1), desc='Trying different group sizes.'):\n result = self.ratio_iou_means(c=c, show=False, save=save, verbose=False, dist=dist, tries=tries)\n collection.append(result)\n return collection\n\n def test_kmeans_eucl(self, cmin, cmax, tries=10, save=True):\n collection = []\n for c in tqdm(range(cmin, cmax + 1), desc='Trying different group sizes.'):\n result = self.ratio_euclid_means(c=c, show=False, save=save, verbose=False, tries=tries)\n collection.append(result)\n return collection\n\n def ratio_join(self):\n sides = self.get_sides().astype(np.float)\n a = sns.jointplot(sides[:, 0], sides[:, 1])\n plt.show()\n\n def scales(self):\n scale_relative_side = min(self.input_size)\n sides = self.get_sides()\n sns.set_theme(style=\"whitegrid\")\n print(sides.shape)\n scale = sides / scale_relative_side\n df_x = pd.DataFrame(scale[:, 0], columns=[\"value\"])\n df_x[\"axis\"] = \"x\"\n df_y = pd.DataFrame(scale[:, 1], columns=[\"value\"])\n df_y[\"axis\"] = \"y\"\n df = pd.concat([df_x, df_y])\n df.to_csv(\"scale_data.csv\", index=False)\n sns.displot(data=df, x=\"value\", hue=\"axis\", palette='dark', alpha=0.8, kind=\"hist\", stat='density')\n plt.xlabel('Relative Boxgröße')\n plt.ylabel('Dichte')\n plt.tight_layout()\n # plt.gcf().savefig('scale_histogram.png')\n plt.show()\n\n def box_distribution(self, mode='scatter'):\n centers = self.get_centers()\n\n if mode == 'scatter':\n plt.scatter(centers[:, 0], centers[:, 1], s=5, alpha=0.4)\n elif mode == 'heatmap':\n bins = 30\n heatmap, xedges, yedges = np.histogram2d(centers[:, 0], centers[:, 1],\n bins=(self.img_size[0] // bins, self.img_size[1] // bins))\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n plt.imshow(heatmap.T, extent=extent, origin='upper')\n elif mode == \"join\":\n jn = sns.jointplot(centers[:, 0], centers[:, 1], ratio=5, space=0.1, kind=\"scatter\",\n xlim=(-10, self.input_size[0]), ylim=(-10, self.input_size[1]),\n joint_kws={'alpha': 0.1})\n jn.fig.set_figwidth(8)\n jn.fig.set_figheight(5)\n jn.ax_joint.set_xlabel(\"Width\")\n jn.ax_joint.set_ylabel(\"Height\")\n plt.gca().invert_yaxis()\n plt.show()\n\n def scale_values(self, clusters=10):\n sides = np.array(list(map(lambda label: self.get_side_len(label, relative=True), self.labels)))\n sides_reshaped = sides.flatten().reshape((-1, 1))\n k_means = KMeans(n_clusters=clusters, random_state=0, verbose=0).fit(sides_reshaped)\n y_means = k_means.predict(sides_reshaped)\n centers = k_means.cluster_centers_.reshape(clusters)\n v, y = np.unique(y_means, return_counts=True)\n sorted_centers = np.round(np.sort(centers), decimals=3)\n sorted_y = y[centers.argsort()]\n # heatmap([sorted_y], xticklabels=sorted_centers, square=True, cbar_kws={\"orientation\": \"horizontal\"})\n plt.plot(sorted_centers, sorted_y)\n plt.xlabel(\"Bounding box scales\")\n plt.title(\"Bbox cluster heatmap\")\n plt.yticks([])\n plt.show()\n\n\nif __name__ == '__main__':\n base_dir = \"/home/t9s9/PycharmProjects/BeeMeter/data/training/\"\n dataset = Dataset.from_sqlite(images_dirs=base_dir + \"base_training_img\", path=base_dir + \"base_labels.db\",\n verbose=False) + \\\n Dataset.from_sqlite(images_dirs=base_dir + \"gopro_training_img\",\n path=base_dir + \"gopro_labels.db\", verbose=False)\n\n a = Analyser((400, 200), dataset)\n # a.ratio_iou_means(c=3, tries=10, show=True, save=False, verbose=True)\n a.scales()\n\n\n def ratios_compare():\n df = pd.read_csv('ratios.csv')\n for i in list(df.groupby(by='dist')):\n plt.plot(i[1]['groups'], i[1]['accuracy'], label=i[0])\n fig = plt.gcf()\n fig.set_size_inches(11, 8)\n plt.legend()\n plt.xlim([1, 8])\n plt.xlabel(\"Anzahl der Gruppen\")\n plt.ylabel(\"Genauigkeit\")\n plt.show()\n fig.savefig('compare_ratio_cluster.png')\n",
"import time\nimport numpy as np\nfrom pathlib import Path\nimport cv2\n\nimport tensorflow as tf\nfrom tensorflow.python.saved_model import tag_constants, signature_constants\nfrom tensorflow.python.compiler.tensorrt import trt_convert as trt\n\nfrom detection.default_boxes.default_boxes import DefaultBoxHandler\nfrom detection.dataset.dataset_generator import Dataset\nfrom detection.model.evaluation import Evaluator\n\ntf.keras.backend.clear_session()\ntf.keras.backend.set_learning_phase(0)\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(gpus[0], True)\n\n\nclass TenorRTSSD:\n def __init__(self, model=None, box_handler=None):\n tf.keras.backend.clear_session()\n self.model = model\n self.box_handler = box_handler\n self.width, self.height = 400, 200\n\n @classmethod\n def from_config(cls, model_path, config_path):\n saved_model = tf.saved_model.load(model_path, tags=[tag_constants.SERVING])\n print(\"Model loaded.\")\n model = saved_model.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n print(\"Model signature.\")\n\n box_handler = DefaultBoxHandler.from_config(config_path)\n print(\"Boxes created.\")\n return cls(model=model, box_handler=box_handler)\n\n def predict(self, x, conf_threshold=0.5, nms_threshold=0.3, sigma=0.5, nms_methode='normal'):\n x = tf.constant(x, dtype=tf.float32)\n x = tf.expand_dims(x, axis=0)\n prediction = self.model(x)\n prediction = prediction['box_and_class'].numpy()\n prediction = self.box_handler.faster_decode_default_boxes(prediction, confidence_threshold=conf_threshold,\n nms_threshold=nms_threshold, sigma=sigma,\n nms_methode=nms_methode)\n return prediction\n\n def benchmark(self, warmup=200, runs=800):\n \"\"\"\n Runs dummy data through the network to get a benchmark of the prediction.\n :param warmup:\n :param runs:\n :return:\n \"\"\"\n elapsed_time = []\n img = tf.constant(np.random.normal(size=(1, 200, 400, 3)).astype(np.float32))\n print(\"Data created.\")\n\n for i in range(warmup):\n output = self.model(img)\n print(\"Finished warmup.\")\n\n for i in range(1, runs + 1):\n img = tf.constant(np.random.normal(size=(1, 200, 400, 3)).astype(np.float32))\n start_time = time.time()\n output = self.model(img)\n end_time = time.time()\n elapsed_time = np.append(elapsed_time, end_time - start_time)\n if i % 50 == 0:\n print('Step {}: {:4.1f}ms'.format(i, (elapsed_time[-50:].mean()) * 1000))\n print('Throughput: {0:.2f} ms/image {1:.0f} FPS'.format(elapsed_time.mean() * 1000,\n runs * 1 / elapsed_time.sum()))\n\n def evaluate(self, image_path, labels_path, iou_threshold=0.5):\n result = []\n\n ds = Dataset.from_sqlite(image_path, labels_path)\n for path in ds.filenames:\n img = cv2.imread(path)\n img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_resize = cv2.resize(img_rgb, (self.width, self.height))\n prediction = self.predict(img_resize, conf_threshold=0.01)[0]\n\n prediction[..., [1, 3]] = np.round(prediction[..., [1, 3]] * (img.shape[1] / self.width), decimals=0)\n prediction[..., [2, 4]] = np.round(prediction[..., [2, 4]] * (img.shape[0] / self.height), decimals=0)\n result.append(prediction)\n\n assigned_prediction = Evaluator.predictions2labels(result, ds.labels, iou_threshold=iou_threshold)\n precision, recall = Evaluator.precision_at_recall(assigned_prediction, ds.nr_labels)\n mAP = Evaluator.calc_average_precision(precision, recall)\n print(\"mAP: {0:.3f}\".format(mAP))\n return mAP\n\n\nif __name__ == '__main__':\n m = TenorRTSSD.from_config(model_path='../resources/demo/tensorRT_FP16',\n config_path=\"../resources/demo/model_config.conf\")\n m.benchmark()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"sklearn.cluster.KMeans",
"numpy.around",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.concatenate",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"numpy.unique",
"matplotlib.pyplot.gcf",
"numpy.std",
"pandas.concat",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.histogram2d",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplots",
"numpy.sort",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks"
],
[
"tensorflow.constant",
"tensorflow.saved_model.load",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.expand_dims",
"numpy.round",
"numpy.append",
"numpy.random.normal",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.backend.set_learning_phase"
]
] |
tegg89/magnet
|
[
"740160aa1ae367d22eeffcd04268adac52ab9108"
] |
[
"env_processing/shaping.py"
] |
[
"import numpy as np\n\nREWARD_FOR_KILLING = 100\nREWARD_FOR_KILLING_YOURSELF = -100\nREWARD_FOR_INCREASING_CAN_KICK = 10\nREWARD_FOR_INCREASING_AMMO = 10\nREWARD_FOR_INCREASING_BLAST_POWER = 10\nREWARD_FOR_DESTROING_WOODEN_WALL = 20\n\n# contain dictionary which describe current bombs on field\nlist_of_sest_boobms = [[], [], [], []]\n# contain dictionary of dead agent, coordinates of bombs which kill them and flag 'was dead on privius step'\nkilled = {}\n# dictionary of coordinate_of_adgent\ncoordinate_of_adgent = {0: [0, 0],\n 1: [0, 0],\n 2: [0, 0],\n 3: [0, 0]}\n\n\ndef init_list_of_vertex(board):\n vertex_name = {} # dictionary of coordinate (converted to x*11 + y, except agents: they alwas stay in 121, 122,\n # 123 and 124 + converted position) to type of vertex\n vertex_list = [] # list of existed vertex in current state\n for i in range(11):\n for j in range(11):\n if board[i, j] == 2:\n vertex_name[i * 11 + j] = 'wooden wall'\n vertex_list.append(i * 11 + j)\n if board[i, j] == 3:\n vertex_name[i * 11 + j] = 'bomb'\n vertex_list.append(i * 11 + j)\n if board[i, j] == 4:\n vertex_name[i * 11 + j] = 'flames'\n vertex_list.append(i * 11 + j)\n if board[i, j] == 5:\n vertex_name[i * 11 + j] = 'fog'\n vertex_list.append(i * 11 + j)\n if board[i, j] == 6:\n vertex_name[i * 11 + j] = 'extra bomb'\n vertex_list.append(i * 11 + j)\n if board[i, j] == 7:\n vertex_name[i * 11 + j] = 'increase range'\n vertex_list.append(i * 11 + j)\n if board[i, j] == 8:\n vertex_name[i * 11 + j] = 'can kick Power-Up'\n vertex_list.append(i * 11 + j)\n if board[i, j] == 10:\n vertex_name[121] = 0\n vertex_list.append(121)\n if board[i, j] == 11:\n vertex_name[122] = 1\n vertex_list.append(122)\n if board[i, j] == 12:\n vertex_name[123] = 2\n vertex_list.append(123)\n if board[i, j] == 13:\n vertex_name[124] = 3\n vertex_list.append(124)\n return vertex_name, vertex_list\n\n\ndef check_next_to_bomb(graph, agent_num, current_state, privius_vertex_name, reward):\n print(agent_num)\n agent_num = int(agent_num)\n for bomb in list_of_sest_boobms[agent_num]:\n bomb['blast strength'] = current_state[bomb['x'] + 23, bomb['y']]\n bomb['life'] = current_state[bomb['x'] + 1, bomb['y']]\n if (int(bomb['life']) == 1):\n # booom\n for key, val in privius_vertex_name.items():\n item_x = key // 11\n item_y = key % 11\n # if it is an agent -- watch coordinet in coordinate_of_adgent\n if val in [0, 1, 2, 3]:\n item_x, item_y = coordinate_of_adgent[val]\n\n # check is this item will be killed by bomb\n if ((bomb['x'] - bomb['blast strength'] < item_x < bomb['x'] + bomb['blast strength']) and (\n item_y == bomb['y'])) or \\\n ((bomb['y'] - bomb['blast strength'] < item_y < bomb['y'] + bomb['blast strength']) and (\n item_x == bomb['x'])):\n\n # kill someone\n if val in [0, 1, 2, 3] and val != agent_num:\n print(agent_num, \" kill \", val)\n killed[val] = {'x': bomb['x'],\n 'y': bomb['y'],\n 'was dead on previous step': False\n }\n reward += REWARD_FOR_KILLING\n # learn that agent, which was killed should avoid this bomb\n graph[val, int(bomb['x']) * 11 + int(bomb['y'])] = -100\n\n # kill yourself\n if val == agent_num:\n print(agent_num, \" kill itself\")\n killed[val] = {'x': bomb['x'],\n 'y': bomb['y'],\n 'was dead on previous step': False\n }\n reward += REWARD_FOR_KILLING_YOURSELF\n\n # learn that agent, which was killed should avoid this bomb\n graph[val, int(bomb['x']) * 11 + int(bomb['y'])] = -100\n\n # destroy wooden wall\n if val == 'wooden wall':\n print(agent_num, \" destroyed wooden wall\")\n reward += REWARD_FOR_DESTROING_WOODEN_WALL\n # delete bomb after booom\n list_of_sest_boobms[agent_num].remove(bomb)\n\n return graph, reward\n\n\ndef reward_shaping(graph, curr_state, prev_state, agent_num):\n coordinate_of_adgent[agent_num] = (int(curr_state[0, 0]), int(curr_state[0, 1]))\n\n prev_state = np.asmatrix(prev_state).reshape(38, 11)\n curr_state = np.asmatrix(curr_state).reshape(38, 11)\n\n curr_vertex_name, curr_vertex_list = init_list_of_vertex(curr_state[12:23])\n prev_vertex_name, prev_vertex_list = init_list_of_vertex(prev_state[12:23])\n\n prev_x = int(prev_state[0, 0])\n prev_y = int(prev_state[0, 1])\n\n reward = 0\n\n # on privius state agent lay bomb\n if prev_state[37, 0] == 5:\n for key, val in prev_vertex_name.items():\n if val == 'bomb': # consider all setted bombs\n bomb_x = key // 11\n bomb_y = key % 11\n # check if that bomb was next to adgent in privius state\n if abs(bomb_x - prev_x) + abs(bomb_y - prev_y) == 1:\n list_of_sest_boobms[agent_num].append({'x': bomb_x,\n 'y': bomb_y,\n 'life': curr_state[bomb_x + 1, bomb_y],\n 'blast strength': curr_state[bomb_x + 23, bomb_y]})\n # add list of [x, y, bomb life, boobm blast strength]\n\n # increase can kick\n if prev_state[34, 0] < curr_state[34, 0]:\n print(agent_num, \" increase can kick\")\n graph[agent_num, prev_x * 11 + prev_y] = 10 # set edge between can kick power up and adjent as 10\n reward += REWARD_FOR_INCREASING_CAN_KICK\n\n # increase ammo\n if prev_state[35, 0] < curr_state[35, 0]:\n print(agent_num, \" increase ammo\")\n graph[agent_num, (prev_x * 11 + prev_y) % 120] = 10 # set edge between increase ammo power up and adjent as 10\n reward += REWARD_FOR_INCREASING_AMMO\n\n # increase blast power\n if prev_state[36, 0] < curr_state[36, 0]:\n print(agent_num, \" increase blast power\")\n graph[agent_num, prev_x * 11 + prev_y] = 10 # set edge between blast power power up and adjent as 10\n reward += REWARD_FOR_INCREASING_BLAST_POWER\n\n graph, reward = check_next_to_bomb(graph, agent_num, curr_state, prev_vertex_name, reward)\n\n # has died\n if agent_num in killed and not killed[agent_num]['was dead on previous step']:\n print(agent_num, \" has dead\")\n killed[agent_num]['was dead on previous step'] = 'True'\n return graph\n\n return graph.astype(\"float32\"), reward\n"
] |
[
[
"numpy.asmatrix"
]
] |
timothijoe/DI-engine
|
[
"f0014586f7043334b5ce78a62c824be90ee1c2d9"
] |
[
"ding/entry/tests/test_serial_entry.py"
] |
[
"import pytest\nimport time\nimport os\nfrom copy import deepcopy\n\nfrom ding.entry import serial_pipeline, collect_demo_data, serial_pipeline_offline\nfrom dizoo.classic_control.cartpole.config.cartpole_dqn_config import cartpole_dqn_config, cartpole_dqn_create_config\nfrom dizoo.classic_control.cartpole.config.cartpole_ppo_config import cartpole_ppo_config, cartpole_ppo_create_config\nfrom dizoo.classic_control.cartpole.config.cartpole_ppo_offpolicy_config import cartpole_ppo_offpolicy_config, \\\n cartpole_ppo_offpolicy_create_config\nfrom dizoo.classic_control.cartpole.config.cartpole_impala_config import cartpole_impala_config, cartpole_impala_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_rainbow_config import cartpole_rainbow_config, cartpole_rainbow_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_iqn_config import cartpole_iqn_config, cartpole_iqn_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_c51_config import cartpole_c51_config, cartpole_c51_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_qrdqn_config import cartpole_qrdqn_config, cartpole_qrdqn_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_sqn_config import cartpole_sqn_config, cartpole_sqn_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_ppg_config import cartpole_ppg_config, cartpole_ppg_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_acer_config import cartpole_acer_config, cartpole_acer_create_config # noqa\nfrom dizoo.classic_control.cartpole.entry.cartpole_ppg_main import main as ppg_main\nfrom dizoo.classic_control.cartpole.entry.cartpole_ppo_main import main as ppo_main\nfrom dizoo.classic_control.cartpole.config.cartpole_r2d2_config import cartpole_r2d2_config, cartpole_r2d2_create_config # noqa\nfrom dizoo.classic_control.pendulum.config import pendulum_ddpg_config, pendulum_ddpg_create_config\nfrom dizoo.classic_control.pendulum.config import pendulum_td3_config, pendulum_td3_create_config\nfrom dizoo.classic_control.pendulum.config import pendulum_sac_config, pendulum_sac_create_config\nfrom dizoo.classic_control.pendulum.config import pendulum_d4pg_config, pendulum_d4pg_create_config\nfrom dizoo.classic_control.bitflip.config import bitflip_her_dqn_config, bitflip_her_dqn_create_config\nfrom dizoo.classic_control.bitflip.entry.bitflip_dqn_main import main as bitflip_dqn_main\nfrom dizoo.multiagent_particle.config import cooperative_navigation_qmix_config, cooperative_navigation_qmix_create_config # noqa\nfrom dizoo.multiagent_particle.config import cooperative_navigation_wqmix_config, cooperative_navigation_wqmix_create_config # noqa\nfrom dizoo.multiagent_particle.config import cooperative_navigation_vdn_config, cooperative_navigation_vdn_create_config # noqa\nfrom dizoo.multiagent_particle.config import cooperative_navigation_coma_config, cooperative_navigation_coma_create_config # noqa\nfrom dizoo.multiagent_particle.config import cooperative_navigation_collaq_config, cooperative_navigation_collaq_create_config # noqa\nfrom dizoo.multiagent_particle.config import cooperative_navigation_qtran_config, cooperative_navigation_qtran_create_config # noqa\nfrom dizoo.multiagent_particle.config import cooperative_navigation_atoc_config, cooperative_navigation_atoc_create_config # noqa\nfrom dizoo.league_demo.league_demo_ppo_config import league_demo_ppo_config\nfrom dizoo.league_demo.selfplay_demo_ppo_main import main as selfplay_main\nfrom dizoo.league_demo.league_demo_ppo_main import main as league_main\nfrom dizoo.classic_control.pendulum.config.pendulum_sac_data_generation_default_config import pendulum_sac_data_genearation_default_config, pendulum_sac_data_genearation_default_create_config # noqa\nfrom dizoo.classic_control.pendulum.config.pendulum_cql_config import pendulum_cql_default_config, pendulum_cql_default_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_qrdqn_generation_data_config import cartpole_qrdqn_generation_data_config, cartpole_qrdqn_generation_data_create_config # noqa\nfrom dizoo.classic_control.cartpole.config.cartpole_cql_config import cartpole_discrete_cql_config, cartpole_discrete_cql_create_config # noqa\nfrom dizoo.classic_control.pendulum.config.pendulum_td3_data_generation_config import pendulum_td3_generation_config, pendulum_td3_generation_create_config # noqa\nfrom dizoo.classic_control.pendulum.config.pendulum_td3_bc_config import pendulum_td3_bc_config, pendulum_td3_bc_create_config # noqa\nfrom dizoo.gym_hybrid.config.gym_hybrid_ddpg_config import gym_hybrid_ddpg_config, gym_hybrid_ddpg_create_config\n\n\[email protected]\[email protected]\ndef test_dqn():\n config = [deepcopy(cartpole_dqn_config), deepcopy(cartpole_dqn_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf log ckpt*')\n\n\[email protected]\ndef test_ddpg():\n config = [deepcopy(pendulum_ddpg_config), deepcopy(pendulum_ddpg_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\n# @pytest.mark.unittest\ndef test_hybrid_ddpg():\n config = [deepcopy(gym_hybrid_ddpg_config), deepcopy(gym_hybrid_ddpg_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_td3():\n config = [deepcopy(pendulum_td3_config), deepcopy(pendulum_td3_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_rainbow():\n config = [deepcopy(cartpole_rainbow_config), deepcopy(cartpole_rainbow_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_iqn():\n config = [deepcopy(cartpole_iqn_config), deepcopy(cartpole_iqn_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_c51():\n config = [deepcopy(cartpole_c51_config), deepcopy(cartpole_c51_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_qrdqn():\n config = [deepcopy(cartpole_qrdqn_config), deepcopy(cartpole_qrdqn_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_ppo():\n config = [deepcopy(cartpole_ppo_offpolicy_config), deepcopy(cartpole_ppo_offpolicy_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_ppo_nstep_return():\n config = [deepcopy(cartpole_ppo_offpolicy_config), deepcopy(cartpole_ppo_offpolicy_create_config)]\n config[0].policy.learn.update_per_collect = 1\n config[0].policy.nstep_return = True\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_sac():\n config = [deepcopy(pendulum_sac_config), deepcopy(pendulum_sac_create_config)]\n config[0].policy.learn.update_per_collect = 1\n config[0].policy.learn.auto_alpha = False\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_sac_auto_alpha():\n config = [deepcopy(pendulum_sac_config), deepcopy(pendulum_sac_create_config)]\n config[0].policy.learn.update_per_collect = 1\n config[0].policy.learn.auto_alpha = True\n config[0].policy.learn.log_space = False\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_sac_log_space():\n config = [deepcopy(pendulum_sac_config), deepcopy(pendulum_sac_create_config)]\n config[0].policy.learn.update_per_collect = 1\n config[0].policy.learn.auto_alpha = True\n config[0].policy.learn.log_space = True\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_r2d2():\n config = [deepcopy(cartpole_r2d2_config), deepcopy(cartpole_r2d2_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=5)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_impala():\n config = [deepcopy(cartpole_impala_config), deepcopy(cartpole_impala_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_her_dqn():\n bitflip_her_dqn_config.policy.cuda = False\n try:\n bitflip_dqn_main(bitflip_her_dqn_config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_collaq():\n config = [deepcopy(cooperative_navigation_collaq_config), deepcopy(cooperative_navigation_collaq_create_config)]\n config[0].policy.cuda = False\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf log ckpt*')\n\n\[email protected]\ndef test_coma():\n config = [deepcopy(cooperative_navigation_coma_config), deepcopy(cooperative_navigation_coma_create_config)]\n config[0].policy.cuda = False\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf log ckpt*')\n\n\[email protected]\ndef test_qmix():\n config = [deepcopy(cooperative_navigation_qmix_config), deepcopy(cooperative_navigation_qmix_create_config)]\n config[0].policy.cuda = False\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf log ckpt*')\n\n\[email protected]\ndef test_wqmix():\n config = [deepcopy(cooperative_navigation_wqmix_config), deepcopy(cooperative_navigation_wqmix_create_config)]\n config[0].policy.cuda = False\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf log ckpt*')\n\n\[email protected]\ndef test_qtran():\n config = [deepcopy(cooperative_navigation_qtran_config), deepcopy(cooperative_navigation_qtran_create_config)]\n config[0].policy.cuda = False\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf log ckpt*')\n\n\[email protected]\ndef test_atoc():\n config = [deepcopy(cooperative_navigation_atoc_config), deepcopy(cooperative_navigation_atoc_create_config)]\n config[0].policy.cuda = False\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf log ckpt*')\n\n\[email protected]\ndef test_ppg():\n cartpole_ppg_config.policy.use_cuda = False\n try:\n ppg_main(cartpole_ppg_config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_sqn():\n config = [deepcopy(cartpole_sqn_config), deepcopy(cartpole_sqn_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf log ckpt*')\n\n\[email protected]\ndef test_selfplay():\n try:\n selfplay_main(deepcopy(league_demo_ppo_config), seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_league():\n try:\n league_main(deepcopy(league_demo_ppo_config), seed=0, max_iterations=1)\n except Exception as e:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_acer():\n config = [deepcopy(cartpole_acer_config), deepcopy(cartpole_acer_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_cql():\n # train expert\n config = [deepcopy(pendulum_sac_config), deepcopy(pendulum_sac_create_config)]\n config[0].policy.learn.update_per_collect = 1\n config[0].exp_name = 'sac'\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n # collect expert data\n import torch\n config = [\n deepcopy(pendulum_sac_data_genearation_default_config),\n deepcopy(pendulum_sac_data_genearation_default_create_config)\n ]\n collect_count = 1000\n expert_data_path = config[0].policy.collect.save_path\n state_dict = torch.load('./sac/ckpt/iteration_0.pth.tar', map_location='cpu')\n try:\n collect_demo_data(\n config, seed=0, collect_count=collect_count, expert_data_path=expert_data_path, state_dict=state_dict\n )\n except Exception:\n assert False, \"pipeline fail\"\n\n # test cql\n config = [deepcopy(pendulum_cql_default_config), deepcopy(pendulum_cql_default_create_config)]\n config[0].policy.learn.train_epoch = 1\n config[0].policy.eval.evaluator.eval_freq = 1\n try:\n serial_pipeline_offline(config, seed=0)\n except Exception:\n assert False, \"pipeline fail\"\n\n\[email protected]\ndef test_d4pg():\n config = [deepcopy(pendulum_d4pg_config), deepcopy(pendulum_d4pg_create_config)]\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception as e:\n assert False, \"pipeline fail\"\n print(repr(e))\n\n\[email protected]\ndef test_discrete_cql():\n # train expert\n config = [deepcopy(cartpole_qrdqn_config), deepcopy(cartpole_qrdqn_create_config)]\n config[0].policy.learn.update_per_collect = 1\n config[0].exp_name = 'cql_cartpole'\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n # collect expert data\n import torch\n config = [deepcopy(cartpole_qrdqn_generation_data_config), deepcopy(cartpole_qrdqn_generation_data_create_config)]\n collect_count = 1000\n expert_data_path = config[0].policy.collect.save_path\n state_dict = torch.load('./cql_cartpole/ckpt/iteration_0.pth.tar', map_location='cpu')\n try:\n collect_demo_data(\n config, seed=0, collect_count=collect_count, expert_data_path=expert_data_path, state_dict=state_dict\n )\n except Exception as e:\n assert False, \"pipeline fail\"\n print(repr(e))\n\n # train cql\n config = [deepcopy(cartpole_discrete_cql_config), deepcopy(cartpole_discrete_cql_create_config)]\n config[0].policy.learn.train_epoch = 1\n config[0].policy.eval.evaluator.eval_freq = 1\n try:\n serial_pipeline_offline(config, seed=0)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf cartpole cartpole_cql')\n\n\[email protected]\ndef test_td3_bc():\n # train expert\n config = [deepcopy(pendulum_td3_config), deepcopy(pendulum_td3_create_config)]\n config[0].exp_name = 'td3'\n config[0].policy.learn.update_per_collect = 1\n try:\n serial_pipeline(config, seed=0, max_iterations=1)\n except Exception:\n assert False, \"pipeline fail\"\n\n # collect expert data\n import torch\n config = [deepcopy(pendulum_td3_generation_config), deepcopy(pendulum_td3_generation_create_config)]\n collect_count = 1000\n expert_data_path = config[0].policy.collect.save_path\n state_dict = torch.load('./td3/ckpt/iteration_0.pth.tar', map_location='cpu')\n try:\n collect_demo_data(\n config, seed=0, collect_count=collect_count, expert_data_path=expert_data_path, state_dict=state_dict\n )\n except Exception:\n assert False, \"pipeline fail\"\n\n # train td3 bc\n config = [deepcopy(pendulum_td3_bc_config), deepcopy(pendulum_td3_bc_create_config)]\n config[0].exp_name = 'td3_bc'\n config[0].policy.learn.train_epoch = 1\n config[0].policy.eval.evaluator.eval_freq = 1\n try:\n serial_pipeline_offline(config, seed=0)\n except Exception:\n assert False, \"pipeline fail\"\n finally:\n os.popen('rm -rf td3 td3_bc')\n"
] |
[
[
"torch.load"
]
] |
liyiliuxingyu/Data-preprocessing-goes-from-getting-started-to-real-world
|
[
"c83fc7b92b52bc106d9a803a6fec24d6470889c3"
] |
[
"preprocess/004_join/03_d/python_not_awesome.py"
] |
[
"from preprocess.load_data.data_loader import load_hotel_reserve\nimport pandas as pd\ncustomer_tb, hotel_tb, reserve_tb = load_hotel_reserve()\n\n# 本书刊登内容如下\nimport pandas.tseries.offsets as offsets\nimport operator\n\n# 为了进行日期计算,这里将数据类型由字符串转换为日期型(详见第10章)\nreserve_tb['reserve_datetime'] = \\\n pd.to_datetime(reserve_tb['reserve_datetime'], format='%Y-%m-%d %H:%M:%S')\n\n# 在不确认reserve_datetime中的日期的情况下,将customer_id相同的数据行全部连接 \nsum_table = pd.merge(\n\treserve_tb[['reserve_id', 'customer_id', 'reserve_datetime']],\n reserve_tb[['customer_id', 'reserve_datetime', 'total_price']]\n .rename(columns={'reserve_datetime': 'reserve_datetime_before'}),\n on='customer_id')\n\n# 比较checkin列的日期,仅提取连接了90天内的数据的数据行\n# 使用operator中的and_函数,设置复合条件\n# 按reserve_id计算total_price的合计值\n# (关于日期时间型,详见第10章)\nsum_table = sum_table[operator.and_(\n sum_table['reserve_datetime'] > sum_table['reserve_datetime_before'],\n sum_table['reserve_datetime'] + offsets.Day(-90) <= sum_table['reserve_datetime_before']\n)].groupby('reserve_id')['total_price'].sum().reset_index()\n\n# 设置列名\nsum_table.columns = ['reserve_id', 'total_price_sum']\n\n# 连接计算出的合计值,将合计值信息加入源表中\n# 使用fillna将不存在合计值的记录的值设置为0\npd.merge(reserve_tb, sum_table, on='reserve_id', how='left').fillna(0)\n"
] |
[
[
"pandas.merge",
"pandas.to_datetime",
"pandas.tseries.offsets.Day"
]
] |
WanyuGroup/CVPR2022-OrphicX
|
[
"98d8d8259439c45661573e575cf956331df16abc"
] |
[
"orphicx_graph.py"
] |
[
"\"\"\" explainer_main.py\n\n Main user interface for the explainer module.\n\"\"\"\nimport argparse\nimport os\nfrom networkx.algorithms.components.connected import connected_components\n\nimport sklearn.metrics as metrics\nfrom functools import partial\nfrom tensorboardX import SummaryWriter\n\nimport sys\nimport time\nimport math\nimport pickle\nimport shutil\nimport torch\nimport random\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport networkx as nx\nimport torch.nn.functional as F\nimport causaleffect\nfrom torch import nn, optim\nfrom gae.model import VGAE3MLP\nfrom gae.optimizer import loss_function as gae_loss\n\nimport sys\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(dir_path, 'gnnexp'))\n\nimport models\nimport utils.io_utils as io_utils\nimport utils.parser_utils as parser_utils\nfrom explainer import explain\n\n\ndecimal_round = lambda x: round(x, 5)\ncolor_map = ['gray', 'blue', 'purple', 'red', 'brown', 'green', 'orange', 'olive']\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, default='Mutagenicity', help='Name of dataset.')\nparser.add_argument('--output', type=str, default=None, help='output path.')\nparser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')\nparser.add_argument('-e', '--epoch', type=int, default=300, help='Number of training epochs.')\nparser.add_argument('-b', '--batch_size', type=int, default=128, help='Number of samples in a minibatch.')\nparser.add_argument('--seed', type=int, default=42, help='Number of training epochs.')\nparser.add_argument('--max_grad_norm', type=float, default=1, help='max_grad_norm.')\nparser.add_argument('--dropout', type=float, default=0., help='Dropout rate (1 - keep probability).')\nparser.add_argument('--encoder_hidden1', type=int, default=32, help='Number of units in hidden layer 1.')\nparser.add_argument('--encoder_hidden2', type=int, default=16, help='Number of units in hidden layer 2.')\nparser.add_argument('--encoder_output', type=int, default=16, help='Dim of output of VGAE encoder.')\nparser.add_argument('--decoder_hidden1', type=int, default=16, help='Number of units in decoder hidden layer 1.')\nparser.add_argument('--decoder_hidden2', type=int, default=16, help='Number of units in decoder hidden layer 2.')\nparser.add_argument('--K', type=int, default=8, help='Number of casual factors.')\nparser.add_argument('--coef_lambda', type=float, default=0.01, help='Coefficient of gae loss.')\nparser.add_argument('--coef_kl', type=float, default=0.01, help='Coefficient of gae loss.')\nparser.add_argument('--coef_causal', type=float, default=1.0, help='Coefficient of causal loss.')\nparser.add_argument('--coef_size', type=float, default=0.0, help='Coefficient of size loss.')\nparser.add_argument('--NX', type=int, default=1, help='Number of monte-carlo samples per causal factor.')\nparser.add_argument('--NA', type=int, default=1, help='Number of monte-carlo samples per causal factor.')\nparser.add_argument('--Nalpha', type=int, default=25, help='Number of monte-carlo samples per causal factor.')\nparser.add_argument('--Nbeta', type=int, default=100, help='Number of monte-carlo samples per noncausal factor.')\nparser.add_argument('--node_perm', action=\"store_true\", help='Use node permutation as data augmentation for causal training.')\nparser.add_argument('--load_ckpt', default=None, help='Load parameters from checkpoint.')\nparser.add_argument('--gpu', action='store_true')\nparser.add_argument('--resume', action='store_true')\nparser.add_argument('--retrain', action='store_true')\nparser.add_argument('--patient', type=int, default=100, help='Patient for early stopping.')\nparser.add_argument('--plot_info_flow', action='store_true')\n\nargs = parser.parse_args()\n\nif args.gpu and torch.cuda.is_available():\n print(\"Use cuda\")\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\nnp.random.seed(args.seed)\nrandom.seed(args.seed)\n\ndef graph_labeling(G):\n for node in G:\n G.nodes[node]['string'] = 1\n old_strings = tuple([G.nodes[node]['string'] for node in G])\n for iter_num in range(100):\n for node in G:\n string = sorted([G.nodes[neigh]['string'] for neigh in G.neighbors(node)])\n G.nodes[node]['concat_string'] = tuple([G.nodes[node]['string']] + string)\n d = nx.get_node_attributes(G,'concat_string')\n nodes,strings = zip(*{k: d[k] for k in sorted(d, key=d.get)}.items())\n map_string = dict([[string, i+1] for i, string in enumerate(sorted(set(strings)))])\n for node in nodes:\n G.nodes[node]['string'] = map_string[G.nodes[node]['concat_string']]\n new_strings = tuple([G.nodes[node]['string'] for node in G])\n if old_strings == new_strings:\n break\n else:\n old_strings = new_strings\n return G\n\ndef preprocess_graph(adj):\n adj_ = adj + np.eye(adj.shape[0])\n rowsum = np.array(adj_.sum(1))\n degree_mat_inv_sqrt = np.diag(np.power(rowsum, -0.5).flatten())\n adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt)\n return torch.from_numpy(adj_normalized).float()\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\ndef gaeloss(x,mu,logvar,data):\n return gae_loss(preds=x, labels=data['adj_label'],\n mu=mu, logvar=logvar, n_nodes=data['n_nodes'],\n norm=data['norm'], pos_weight=data['pos_weight'])\n\nsoftmax = torch.nn.Softmax(dim=1)\nce = torch.nn.CrossEntropyLoss(reduction='mean')\n\ndef main():\n # Load a model checkpoint\n ckpt = torch.load('ckpt/%s_base_h20_o20.pth.tar'%(args.dataset))\n cg_dict = ckpt[\"cg\"] # get computation graph\n input_dim = cg_dict[\"feat\"].shape[2] \n num_classes = cg_dict[\"pred\"].shape[2]\n print(\"input dim: \", input_dim, \"; num classes: \", num_classes)\n\n # Explain Graph prediction\n classifier = models.GcnEncoderGraph(\n input_dim=input_dim,\n hidden_dim=20,\n embedding_dim=20,\n label_dim=num_classes,\n num_layers=3,\n bn=False,\n args=argparse.Namespace(gpu=args.gpu,bias=True,method=None),\n ).to(device)\n\n # load state_dict (obtained by model.state_dict() when saving checkpoint)\n classifier.load_state_dict(ckpt[\"model_state\"])\n classifier.eval()\n print(\"Number of graphs:\", cg_dict[\"adj\"].shape[0])\n if args.output is None:\n args.output = args.dataset\n\n K = args.K\n L = args.encoder_output - K\n ceparams = {\n 'Nalpha': args.Nalpha,\n 'Nbeta' : args.Nbeta,\n 'K' : K,\n 'L' : L,\n 'z_dim' : args.encoder_output,\n 'M' : num_classes}\n\n model = VGAE3MLP(\n input_dim + 100, args.encoder_hidden1, args.encoder_hidden1,\n args.encoder_output, args.decoder_hidden1, args.decoder_hidden2,\n args.K, args.dropout\n ).to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n criterion = gaeloss\n label_onehot = torch.eye(100, dtype=torch.float)\n class GraphSampler(torch.utils.data.Dataset):\n \"\"\" Sample graphs and nodes in graph\n \"\"\"\n def __init__(\n self,\n graph_idxs\n ):\n self.graph_idxs = graph_idxs\n self.graph_data = []\n for graph_idx in graph_idxs:\n adj = cg_dict[\"adj\"][graph_idx].float()\n label = cg_dict[\"label\"][graph_idx].long()\n feat = cg_dict[\"feat\"][graph_idx, :].float()\n G = graph_labeling(nx.from_numpy_array(cg_dict[\"adj\"][graph_idx].numpy()))\n graph_label = np.array([G.nodes[node]['string'] for node in G])\n graph_label_onehot = label_onehot[graph_label]\n sub_feat = torch.cat((feat, graph_label_onehot), dim=1)\n adj_label = adj + np.eye(adj.shape[0])\n n_nodes = adj.shape[0]\n graph_size = torch.count_nonzero(adj.sum(-1))\n pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()\n pos_weight = torch.from_numpy(np.array(pos_weight))\n norm = torch.tensor(adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2))\n self.graph_data += [{\n \"graph_idx\": graph_idx,\n \"graph_size\": graph_size, \n \"sub_adj\": adj.to(device), \n \"feat\": feat.to(device).float(), \n \"sub_feat\": sub_feat.to(device).float(), \n \"sub_label\": label.to(device).float(), \n \"adj_label\": adj_label.to(device).float(),\n \"n_nodes\": torch.Tensor([n_nodes])[0].to(device),\n \"pos_weight\": pos_weight.to(device),\n \"norm\": norm.to(device)\n }]\n\n def __len__(self):\n return len(self.graph_idxs)\n\n def __getitem__(self, idx):\n return self.graph_data[idx]\n\n train_idxs = np.array(cg_dict['train_idx'])\n val_idxs = np.array(cg_dict['val_idx'])\n test_idxs = np.array(cg_dict['test_idx'])\n train_graphs = GraphSampler(train_idxs)\n train_dataset = torch.utils.data.DataLoader(\n train_graphs,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=0,\n )\n val_graphs = GraphSampler(val_idxs)\n val_dataset = torch.utils.data.DataLoader(\n val_graphs,\n batch_size=1000,\n shuffle=False,\n num_workers=0,\n )\n test_graphs = GraphSampler(test_idxs)\n test_dataset = torch.utils.data.DataLoader(\n test_graphs,\n batch_size=1000,\n shuffle=False,\n num_workers=0,\n )\n\n def eval_model(dataset, prefix=''):\n model.eval()\n with torch.no_grad():\n for data in dataset:\n labels = cg_dict['label'][data['graph_idx'].long()].long().to(device)\n recovered, mu, logvar = model(data['sub_feat'], data['sub_adj'])\n recovered_adj = torch.sigmoid(recovered)\n nll_loss = criterion(recovered, mu, logvar, data).mean()\n org_adjs = data['sub_adj']\n org_logits = classifier(data['feat'], data['sub_adj'])[0]\n org_probs = F.softmax(org_logits, dim=1)\n org_log_probs = F.log_softmax(org_logits, dim=1)\n masked_recovered_adj = recovered_adj * data['sub_adj']\n recovered_logits = classifier(data['feat'], masked_recovered_adj)[0]\n recovered_probs = F.softmax(recovered_logits, dim=1)\n recovered_log_probs = F.log_softmax(recovered_logits, dim=1)\n alpha_mu = torch.zeros_like(mu)\n alpha_mu[:,:,:args.K] = mu[:,:,:args.K]\n alpha_adj = torch.sigmoid(model.dc(alpha_mu))\n masked_alpha_adj = alpha_adj * data['sub_adj']\n alpha_logits = classifier(data['feat'], masked_alpha_adj)[0]\n beta_mu = torch.zeros_like(mu)\n beta_mu[:,:,args.K:] = mu[:,:,args.K:]\n beta_adj = torch.sigmoid(model.dc(beta_mu))\n masked_beta_adj = beta_adj * data['sub_adj']\n beta_logits = classifier(data['feat'], masked_beta_adj)[0]\n causal_loss = []\n beta_info = []\n \n for idx in random.sample(range(0, data['feat'].shape[0]), args.NX): \n _causal_loss, _ = causaleffect.joint_uncond(ceparams, model.dc, classifier, data['sub_adj'][idx], data['feat'][idx], act=torch.sigmoid, device=device)\n _beta_info, _ = causaleffect.beta_info_flow(ceparams, model.dc, classifier, data['sub_adj'][idx], data['feat'][idx], act=torch.sigmoid, device=device)\n causal_loss += [_causal_loss]\n beta_info += [_beta_info]\n for A_idx in random.sample(range(0, data['feat'].shape[0]), args.NA-1):\n if args.node_perm:\n perm = torch.randperm(data['graph_size'][idx])\n perm_adj = data['sub_adj'][idx].clone().detach()\n perm_adj[:data['graph_size'][idx]] = perm_adj[perm]\n else:\n perm_adj = data['sub_adj'][A_idx]\n _causal_loss, _ = causaleffect.joint_uncond(ceparams, model.dc, classifier, perm_adj, data['feat'][idx], act=torch.sigmoid, device=device)\n _beta_info, _ = causaleffect.beta_info_flow(ceparams, model.dc, classifier, perm_adj, data['feat'][idx], act=torch.sigmoid, device=device)\n causal_loss += [_causal_loss]\n beta_info += [_beta_info]\n causal_loss = torch.stack(causal_loss).mean()\n alpha_info = causal_loss\n beta_info = torch.stack(beta_info).mean()\n klloss = F.kl_div(F.log_softmax(alpha_logits, dim=1), org_probs, reduction='mean')\n pred_labels = torch.argmax(org_probs,axis=1)\n org_acc = (torch.argmax(org_probs,axis=1) == torch.argmax(recovered_probs,axis=1)).float().mean()\n pred_acc = (torch.argmax(recovered_probs,axis=1) == labels).float().mean()\n kl_pred_org = F.kl_div(recovered_log_probs, org_probs, reduction='mean')\n alpha_probs = F.softmax(alpha_logits, dim=1)\n alpha_log_probs = F.log_softmax(alpha_logits, dim=1)\n beta_probs = F.softmax(beta_logits, dim=1)\n beta_log_probs = F.log_softmax(beta_logits, dim=1)\n alpha_gt_acc = (torch.argmax(alpha_probs,axis=1) == labels).float().mean()\n alpha_pred_acc = (torch.argmax(alpha_probs,axis=1) == pred_labels).float().mean()\n alpha_kld = F.kl_div(alpha_log_probs, org_probs, reduction='mean')\n beta_gt_acc = (torch.argmax(beta_probs,axis=1) == labels).float().mean()\n beta_pred_acc = (torch.argmax(beta_probs,axis=1) == pred_labels).float().mean()\n beta_kld = F.kl_div(beta_log_probs, org_probs, reduction='mean')\n alpha_sparsity = masked_alpha_adj.mean((1,2))/org_adjs.mean((1,2))\n loss = args.coef_lambda * nll_loss + \\\n args.coef_causal * causal_loss + \\\n args.coef_kl * klloss + \\\n args.coef_size * alpha_sparsity.mean()\n writer.add_scalar(\"%s/total_loss\"%prefix, loss, epoch)\n writer.add_scalar(\"%s/nll\"%prefix, nll_loss, epoch)\n writer.add_scalar(\"%s/causal\"%prefix, causal_loss, epoch)\n writer.add_scalar(\"%s/alpha_info_flow\"%prefix, alpha_info/(alpha_info+beta_info), epoch)\n writer.add_scalar(\"%s/beta_info_flow\"%prefix, beta_info/(alpha_info+beta_info), epoch)\n writer.add_scalar(\"%s/acc(Y_rec, Y_org)\"%prefix, org_acc, epoch)\n writer.add_scalar(\"%s/acc(Y_rec, labels)\"%prefix, pred_acc, epoch)\n writer.add_scalar(\"%s/kld(Y_rec, Y_org)\"%prefix, kl_pred_org, epoch)\n writer.add_scalar(\"%s/kld(Y_alpha, Y_org)\"%prefix, alpha_kld, epoch)\n writer.add_scalar(\"%s/kld(Y_beta, Y_org)\"%prefix, beta_kld, epoch)\n writer.add_scalar(\"%s/alpha_sparsity\"%prefix, alpha_sparsity.mean(), epoch)\n writer.add_scalar(\"%s/acc(Y_alpha, labels)\"%prefix, alpha_gt_acc, epoch)\n writer.add_scalar(\"%s/acc(Y_beta, labels)\"%prefix, beta_gt_acc, epoch)\n writer.add_scalar(\"%s/acc(Y_alpha, Y_org)\"%prefix, alpha_pred_acc, epoch)\n writer.add_scalar(\"%s/acc(Y_beta, Y_org)\"%prefix, beta_pred_acc, epoch)\n return loss.item()\n\n def save_checkpoint(filename):\n torch.save({\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_loss': best_loss,\n 'epoch': epoch\n }, filename)\n\n if args.load_ckpt:\n ckpt_path = args.load_ckpt\n else:\n ckpt_path = os.path.join('explanation', args.output, 'model.ckpt')\n if os.path.exists(ckpt_path) and not args.retrain:\n print(\"Load checkpoint from {}\".format(ckpt_path))\n checkpoint = torch.load(ckpt_path)\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n start_epoch = checkpoint['epoch'] + 1\n best_loss = checkpoint['best_loss']\n else:\n args.retrain = True\n start_epoch = 1\n best_loss = 100\n if args.resume or args.retrain:\n patient = args.patient\n model.train()\n start_time = time.time()\n writer = SummaryWriter(comment=args.output)\n os.makedirs('explanation/%s' % args.output, exist_ok=True)\n for epoch in tqdm(range(start_epoch, args.epoch+1)):\n # print(\"------- Epoch %2d ------\" % epoch)\n model.train()\n train_losses = []\n for batch_idx, data in enumerate(train_dataset):\n optimizer.zero_grad()\n mu, logvar = model.encode(data['sub_feat'], data['sub_adj'])\n sample_mu = model.reparameterize(mu, logvar)\n recovered = model.dc(sample_mu)\n org_logit = classifier(data['feat'], data['sub_adj'])[0]\n org_probs = F.softmax(org_logit, dim=1)\n if args.coef_lambda:\n nll_loss = args.coef_lambda * criterion(recovered, mu, logvar, data).mean()\n else:\n nll_loss = 0\n alpha_mu = torch.zeros_like(sample_mu)\n alpha_mu[:,:,:args.K] = sample_mu[:,:,:args.K]\n alpha_adj = torch.sigmoid(model.dc(alpha_mu))\n masked_alpha_adj = alpha_adj * data['sub_adj']\n alpha_logit = classifier(data['feat'], masked_alpha_adj)[0]\n alpha_sparsity = masked_alpha_adj.mean((1,2))/data['sub_adj'].mean((1,2))\n if args.coef_causal:\n causal_loss = []\n NX = min(data['feat'].shape[0], args.NX)\n NA = min(data['feat'].shape[0], args.NA)\n for idx in random.sample(range(0, data['feat'].shape[0]), NX):\n _causal_loss, _ = causaleffect.joint_uncond(ceparams, model.dc, classifier, data['sub_adj'][idx], data['feat'][idx], act=torch.sigmoid, device=device)\n causal_loss += [_causal_loss]\n for A_idx in random.sample(range(0, data['feat'].shape[0]), NA-1):\n if args.node_perm:\n perm = torch.randperm(data['graph_size'][idx])\n perm_adj = data['sub_adj'][idx].clone().detach()\n perm_adj[:data['graph_size'][idx]] = perm_adj[perm]\n else:\n perm_adj = data['sub_adj'][A_idx]\n _causal_loss, _ = causaleffect.joint_uncond(ceparams, model.dc, classifier, perm_adj, data['feat'][idx], act=torch.sigmoid, device=device)\n causal_loss += [_causal_loss]\n causal_loss = args.coef_causal * torch.stack(causal_loss).mean()\n else:\n causal_loss = 0\n if args.coef_kl:\n klloss = args.coef_kl * F.kl_div(F.log_softmax(alpha_logit,dim=1), org_probs, reduction='mean')\n else:\n klloss = 0\n if args.coef_size:\n size_loss = args.coef_size * alpha_sparsity.mean()\n else:\n size_loss = 0\n\n loss = nll_loss + causal_loss + klloss + size_loss\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n train_losses += [[nll_loss, causal_loss, klloss, size_loss]]\n sys.stdout.flush()\n \n # train_loss = (torch.cat(train_losses)).mean().item()\n nll_loss, causal_loss, klloss, size_loss = torch.tensor(train_losses).mean(0)\n writer.add_scalar(\"train/nll\", nll_loss, epoch)\n writer.add_scalar(\"train/causal\", causal_loss, epoch)\n writer.add_scalar(\"train/kld(Y_alpha,Y_org)\", klloss, epoch)\n writer.add_scalar(\"train/alpha_sparsity\", size_loss, epoch)\n writer.add_scalar(\"train/total_loss\", nll_loss + causal_loss + klloss + size_loss, epoch)\n\n val_loss = eval_model(val_dataset, 'val')\n patient -= 1\n if val_loss < best_loss:\n best_loss = val_loss\n save_checkpoint('explanation/%s/model.ckpt' % args.output)\n test_loss = eval_model(test_dataset, 'test')\n patient = 100\n elif patient <= 0:\n print(\"Early stopping!\")\n break\n if epoch % 100 == 0:\n save_checkpoint('explanation/%s/model-%depoch.ckpt' % (args.output,epoch))\n print(\"Train time:\", time.time() - start_time)\n writer.close()\n checkpoint = torch.load('explanation/%s/model.ckpt' % args.output)\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n print(\"Start evaluation.\")\n\n model.eval()\n results = []\n with torch.no_grad():\n for data in test_dataset:\n labels = cg_dict['label'][data['graph_idx'].long()].long().to(device)\n mu, logvar = model.encode(data['sub_feat'], data['sub_adj'])\n org_logits = classifier(data['feat'], data['sub_adj'])[0]\n org_probs = F.softmax(org_logits, dim=1)\n pred_labels = torch.argmax(org_probs,axis=1)\n alpha_mu = torch.zeros_like(mu)\n std = torch.exp(logvar)\n eps = torch.randn_like(std)\n alpha_mu[:,:,:args.K] = eps.mul(std).add_(mu)[:,:,:args.K]\n alpha_adj = torch.sigmoid(model.dc(alpha_mu))\n masked_alpha_adj = alpha_adj * data['sub_adj']\n flatten_alpha_adj = masked_alpha_adj.flatten(1)\n for sparsity in np.arange(0, 1, 0.05):\n topk = torch.round(data['sub_adj'].sum((1,2)) * sparsity).long().unsqueeze(-1)\n threshold = torch.gather(flatten_alpha_adj.sort(1,descending=True).values, 1, topk)\n threshold = torch.maximum(threshold, torch.ones_like(threshold)*1E-6)\n topk_alpha_adj = (flatten_alpha_adj > threshold).float().view(data['sub_adj'].shape)\n alpha_logits = classifier(data['feat'], topk_alpha_adj)[0]\n alpha_log_probs = F.log_softmax(alpha_logits, dim=1)\n results += [{\n \"sparsity\": sparsity,\n \"alpha_topk\": topk_alpha_adj.sum((1,2)).mean().item()/2,\n \"alpha_sparsity\": (topk_alpha_adj.sum((1,2))/data['sub_adj'].sum((1,2))).mean().item(),\n \"alpha_gt_acc\": (torch.argmax(alpha_logits,axis=1) == labels).float().mean().item(),\n \"alpha_pred_acc\": (torch.argmax(alpha_logits,axis=1) == pred_labels).float().mean().item(),\n \"alpha_kld\": F.kl_div(alpha_log_probs, org_probs, reduction='batchmean').item()\n }]\n columns = results[0].keys()\n df = pd.DataFrame(results, columns = columns)\n df.to_csv(os.path.join('explanation', args.output, 'results.csv'))\n print(df)\n \n if args.plot_info_flow:\n print(\"Calculating information flow...\")\n with torch.no_grad():\n infos = [\n [\n - causaleffect.joint_uncond_singledim(\n ceparams, model.dc, classifier, \n data['sub_adj'][idx], data['feat'][idx], \n dim, act=torch.sigmoid, device=device\n )[0] for dim in range(ceparams['z_dim'])\n ] for idx in tqdm(range(data['feat'].shape[0]))\n ]\n infos = torch.tensor(infos, device=device)\n infos = F.normalize(infos, p=1, dim=1)\n print(infos.mean(0))\n results = []\n for info in infos:\n for dim in range(ceparams['z_dim']):\n results += [{'dim': dim+1, 'info': info[dim].item()}]\n df = pd.DataFrame(results, columns = results[0].keys())\n df.to_csv(os.path.join('explanation', args.output, 'info_flow.csv'))\n import matplotlib\n import matplotlib.pyplot as plt\n import seaborn as sns\n colors = [\"red\", \"blue\", \"orange\", \"green\"]\n customPalette = sns.set_palette(sns.color_palette(colors))\n matplotlib.rcParams.update({'font.size': 16})\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n f = plt.figure(figsize=(7,5))\n ax = sns.barplot(data=df, x='dim', y='info', palette=customPalette)\n plt.xlabel('Z [i]')\n plt.ylabel('Information Measurements')\n f.savefig(os.path.join('explanation', args.output, 'info_flow.pdf'))\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n\n"
] |
[
[
"torch.nn.functional.kl_div",
"torch.nn.Softmax",
"torch.nn.functional.softmax",
"torch.randn_like",
"torch.load",
"torch.cat",
"torch.randperm",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.nn.CrossEntropyLoss",
"torch.Size",
"numpy.arange",
"numpy.eye",
"torch.eye",
"torch.from_numpy",
"torch.tensor",
"torch.ones_like",
"matplotlib.pyplot.figure",
"torch.sigmoid",
"numpy.power",
"torch.zeros_like",
"torch.exp",
"matplotlib.rcParams.update",
"torch.stack",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"torch.nn.functional.normalize",
"numpy.random.seed",
"torch.nn.functional.log_softmax",
"torch.Tensor",
"torch.manual_seed",
"matplotlib.pyplot.xlabel",
"torch.sparse.FloatTensor",
"numpy.vstack",
"torch.argmax"
]
] |
bbjy/DAN
|
[
"02e04e0aea2aa65373e7d9b4827ce76b80223149"
] |
[
"encoder.py"
] |
[
"import os\r\nimport numpy as np\r\nfrom scipy import sparse\r\nfrom scipy.sparse import coo_matrix\r\nfrom scipy.sparse import vstack\r\nfrom scipy.spatial.distance import pdist\r\nfrom copy import deepcopy\r\nfrom collections import OrderedDict\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn import Module\r\nfrom torch.nn import functional as F\r\nfrom torch.autograd import Variable\r\nimport dgl\r\nfrom dgl.nn.pytorch import GraphConv\r\nfrom tool import bpr_loss\r\n\r\nclass Encoder(nn.Module):\r\n\tdef __init__(self,\r\n\t\t\t\tg,\r\n\t\t\t\tfeatures,\r\n\t\t\t\t# userNum,\r\n\t\t\t\t# itemNum,\r\n\t\t\t\tin_dim,\r\n\t\t\t\tn_hiddens, # a list\r\n\t\t\t\tout_dim,\r\n\t\t\t\tn_layers,\r\n\t\t\t\tactivation,\r\n\t\t\t\tdropout):\r\n\t\tsuper(Encoder,self).__init__()\r\n\t\tself.g = g\r\n\t\tself.features = features\t\r\n\t\tself.layers = nn.ModuleList()\r\n\t\tself.layers.append(GraphConv(in_dim, n_hiddens[0], activation=activation))\r\n\t\tfor i in range(n_layers - 1):\r\n\t\t\tself.layers.append(GraphConv(n_hiddens[i], n_hiddens[i+1], activation=activation))\r\n\t\tself.dropout = nn.Dropout(p=dropout)\r\n\t\tself.SR_layer = nn.Sequential(\r\n\t\t\t\t\t\tnn.Linear(n_hiddens[-1], 64),\r\n\t\t\t\t\t\tnn.Tanh(),\r\n\t\t\t\t\t\tnn.Linear(64, out_dim)\r\n\t\t\t\t\t\t)\t\t\r\n\r\n\tdef forward(self, users=None, mapped_feature=None, isreplace=False, only_common=False):\r\n\t\t# 这里的userIdx和pos_itemIdx(or neg_itemIdx)是成对儿的,\r\n\t\t\r\n\t\tif isreplace:\r\n\t\t\th = self.features.clone()\r\n\t\t\th[users] = mapped_feature\r\n\t\telse:\r\n\t\t\th = self.features.clone()\r\n\t\tfor i, layer in enumerate(self.layers):\r\n\t\t\tif i != 0:\r\n\t\t\t\th = self.dropout(h)\r\n\t\t\th = layer(self.g, h)\r\n\t\tuser_h = h[users]\r\n\t\tR = self.SR_layer(user_h)\r\n\t\treturn R, h\r\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.Tanh"
]
] |
xuyu92327/waveform-analysis
|
[
"8216cc8d7a75fc38d3fbc236d8b6b6cba963f78c"
] |
[
"source/wxh/remove_noise.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport h5py\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport multiprocessing as mp\n\n# define function to devide waveform list into groups\n\n\ndef divide(ls, unit_len):\n DividedLs = []\n UnitLen = int(unit_len)\n GroupCount = int(len(ls) / UnitLen)\n GroupCountExact = len(ls) / UnitLen\n Start = 0\n for i in range(GroupCount):\n DividedLs.append(ls[Start: (Start + UnitLen)])\n Start = Start + UnitLen\n if GroupCount < GroupCountExact: # put all remainders into the last group\n DividedLs.append(ls[GroupCount * UnitLen:])\n return DividedLs\n\n# define function to get width of division\n\n\ndef width(lst):\n i = 0\n for j in lst[0]:\n i += 1\n return i\n\n# define function to get average\n\n\ndef GetAverage(mat):\n average = []\n for group in mat:\n average.append(np.mean(group))\n return average\n\n\n# define function to get standard deviation\ndef GetStd(mat):\n std = []\n for group in mat:\n std.append(np.std(group, ddof=1))\n return std\n\n# define function to remove noise in waveform\n\n\ndef DenoisMat(mat, StdJudge):\n average = GetAverage(mat)\n std = GetStd(mat)\n n = len(mat)\n m = width(mat)\n # replace small fluctuations with average\n for i in range(n-1):\n if std[i] < StdJudge:\n mat[i] = [average[i]] * m\n if std[-1] < StdJudge:\n mat[-1] = [average[-1]] * len(mat[-1])\n m = width(mat)\n num = [0] * m\n # merge into one list\n DenoisMat = []\n for i in range(n):\n for j in mat[i]:\n DenoisMat.append(j)\n return DenoisMat\n\n\ndef dwave(i):\n wave = i['Waveform']\n std = np.std(wave, ddof=1)\n StdJudge = std / 2\n unit_len = 3\n mat = divide(wave, unit_len)\n wave = DenoisMat(mat, StdJudge)\n return wave\n\n\nif __name__ == '__main__':\n # runtime\n pool = mp.Pool(32)\n \n # input file\n with h5py.File(sys.argv[1], 'r') as ipt:\n wf = ipt['Waveform']\n # replace small unit fluctuations with average\n DenoisWave = pool.map(dwave, tqdm(wf))\n # output data\n denois = np.zeros(len(wf), dtype=wf.dtype)\n denois['EventID'] = wf['EventID']\n denois['ChannelID'] = wf['ChannelID']\n denois['Waveform'] = DenoisWave\n \n # output file\n with h5py.File(sys.argv[2], 'w') as opt:\n opt.create_dataset('Waveform', data=denois)\n"
] |
[
[
"numpy.std",
"numpy.mean"
]
] |
WeiChe-Huang/ONNX_Convertor
|
[
"7ba4fe3fd9f606d39cf61b46080c3dc244dfe207"
] |
[
"optimizer_scripts/tools/removing_transpose.py"
] |
[
"from . import helper\nfrom . import other\nfrom . import modhelper\nfrom . import fusing\nimport numpy as np\nimport onnx\nimport onnx.utils\n\ndef eliminate_transposes(m):\n g = m.graph\n keep_eliminating = True\n while keep_eliminating:\n while swap_transpose_with_single_next_node(g):\n pass\n splitted = split_transpose_for_multiple_next_nodes(g)\n annihilated = annihilate_transposes(g)\n multiple_trans_swapped = swap_multiple_transposes_with_node(g)\n keep_eliminating = splitted or annihilated or multiple_trans_swapped\n\n if keep_eliminating:\n m = onnx.utils.polish_model(m)\n g = m.graph\n \n return m\n\n\ndef swap_transpose_with_single_next_node(g):\n swapped = False\n passable_nodes = set(['Relu', 'Neg', 'LeakyRelu', 'Sqrt', 'Reciprocal', 'Add', 'Mul', 'Tanh'])\n for node in g.node:\n trans_node = node\n if trans_node.op_type != 'Transpose':\n continue\n next_nodes = helper.find_nodes_by_input_name(g, trans_node.output[0])\n if len(next_nodes) != 1:\n continue\n next_node = next_nodes[0]\n if next_node.op_type not in passable_nodes:\n continue\n \n input_nodes = [helper.find_node_by_output_name(g, input_name) for input_name in next_node.input]\n \n nonconstant_input = False\n for input_node in input_nodes:\n if input_node == None:\n nonconstant_input = True\n break\n if input_node.name == trans_node.name:\n continue\n elif input_node.op_type == 'Constant':\n continue\n else:\n nonconstant_input = True\n break\n if nonconstant_input:\n continue\n \n for input_node in input_nodes:\n if input_node.name == trans_node.name:\n # if the input is just the transpose node\n next_value_info = helper.find_value_by_name(g, next_node.output[0])\n mid_value_info = helper.find_value_by_name(g, trans_node.output[0])\n\n output_nodes = helper.find_nodes_by_input_name(g, next_node.output[0])\n for out_node in output_nodes:\n modhelper.replace_node_input(out_node, next_node.output[0], trans_node.name)\n\n next_node.input[0] = trans_node.input[0]\n next_node.output[0] = next_node.name\n trans_node.input[0] = next_node.name\n trans_node.output[0] = trans_node.name\n\n if next_value_info:\n next_value_info.name = trans_node.name\n if mid_value_info:\n g.value_info.remove(mid_value_info)\n else:\n # if the input is a constant node\n old_tensor = input_node.attribute[0].t\n old_shape, data = helper.constant_to_list(input_node)\n permutation = list(trans_node.attribute[0].ints)\n while len(old_shape) < len(permutation):\n old_shape.insert(0, 1)\n np_data = np.reshape(data, old_shape)\n reverse_perm = []\n for i in range(len(permutation)):\n reverse_perm.append(permutation.index(i))\n np_data = np.transpose(np_data, reverse_perm)\n new_shape = np_data.shape\n new_tensor = onnx.helper.make_tensor(\n name=old_tensor.name,\n data_type=old_tensor.data_type,\n dims=new_shape, \n vals=np_data.flatten().tolist()\n )\n new_node = onnx.helper.make_node(\n 'Constant',\n [],\n [input_node.output[0]],\n name=input_node.name,\n value=new_tensor\n )\n g.node.extend([new_node])\n \n g.value_info.remove(helper.find_value_by_name(g, input_node.output[0]))\n g.node.remove(input_node)\n \n swapped = True\n \n other.topological_sort(g)\n return swapped\n\n\ndef swap_multiple_transposes_with_node(g):\n # here only consider same input transposes\n swapped = False\n passable_nodes = set(['Add', 'Mul'])\n node_to_del = []\n for node in g.node: \n if node.op_type not in passable_nodes:\n continue\n input_nodes = [helper.find_node_by_output_name(g, input_name) for input_name in node.input]\n if any([input_node == None for input_node in input_nodes]):\n continue\n if any([input_node.op_type != 'Transpose' for input_node in input_nodes]):\n continue\n\n permutation = list(input_nodes[0].attribute[0].ints)\n if any([list(input_node.attribute[0].ints) != permutation for input_node in input_nodes]):\n continue\n \n for input_name in node.input:\n input_node = helper.find_node_by_output_name(g, input_name)\n modhelper.replace_node_input(node, input_name, input_node.input[0]) \n\n node_to_del.extend(input_nodes)\n for input_node in input_nodes:\n input_val_info = helper.find_value_by_name(g, input_node.output[0])\n if input_val_info is not None:\n g.value_info.remove(input_val_info)\n output_val_info = helper.find_value_by_name(g, node.output[0])\n if output_val_info is not None:\n g.value_info.remove(output_val_info)\n\n output_nodes = helper.find_nodes_by_input_name(g, node.output[0])\n for i in range(len(output_nodes)):\n new_trans_node_name = node.name+'_trans_'+str(i)\n new_trans_node = onnx.helper.make_node(\n 'Transpose',\n [node.output[0]],\n [new_trans_node_name],\n name=new_trans_node_name,\n perm=permutation\n )\n modhelper.replace_node_input(output_nodes[i], node.output[0], new_trans_node_name)\n \n g.node.extend([new_trans_node])\n \n swapped = True \n \n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n \n other.topological_sort(g)\n return swapped\n\n\ndef annihilate_transposes(g):\n node_to_del = []\n annihilated = False\n for node in g.node:\n if node.op_type != 'Transpose':\n continue\n pre_node = helper.find_node_by_output_name(g, node.input[0])\n if not pre_node or pre_node.op_type != 'Transpose':\n continue\n nodes_from_top_transpose = helper.find_nodes_by_input_name(g, pre_node.output[0])\n if len(nodes_from_top_transpose) > 1:\n continue\n \n perm_1 = list(pre_node.attribute[0].ints)\n perm_2 = list(node.attribute[0].ints)\n if perm_1 != perm_2:\n continue\n\n out_nodes = helper.find_nodes_by_input_name(g, node.output[0])\n for out_node in out_nodes:\n modhelper.replace_node_input(out_node, node.output[0], pre_node.input[0])\n \n node_to_del.extend([node, pre_node])\n mid_value_info = helper.find_value_by_name(g, pre_node.output[0])\n out_value_info = helper.find_value_by_name(g, node.output[0])\n g.value_info.remove(mid_value_info)\n g.value_info.remove(out_value_info)\n\n annihilated = True\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n \n return annihilated\n\n\ndef split_transpose_for_multiple_next_nodes(g):\n splitted = False\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Transpose':\n continue\n output_nodes = helper.find_nodes_by_input_name(g, node.output[0])\n if len(output_nodes) < 2:\n continue\n for i in range(len(output_nodes)):\n output_node = output_nodes[i]\n new_trans_node_name = node.name + '_' + str(i)\n new_trans_node = onnx.helper.make_node(\n 'Transpose',\n [node.input[0]],\n [new_trans_node_name],\n name=new_trans_node_name,\n perm=list(node.attribute[0].ints)\n )\n modhelper.replace_node_input(output_node, node.output[0], new_trans_node.output[0])\n g.node.extend([new_trans_node])\n \n node_to_del.append(node)\n val_info = helper.find_value_by_name(g, node.output[0])\n g.value_info.remove(val_info)\n\n splitted = True\n \n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n \n other.topological_sort(g)\n return splitted\n\ndef remove_trivial_transpose(g):\n node_to_del = []\n for node in g.node:\n if node.op_type != 'Transpose':\n continue\n permutation = list(node.attribute[0].ints)\n if permutation != list(range(len(permutation))):\n continue\n \n next_nodes = helper.find_nodes_by_input_name(g, node.output[0])\n if not next_nodes:\n input_val_info = helper.find_value_by_name(g, node.input[0])\n out_val_info = helper.find_output_by_name(g, node.output[0])\n if not input_val_info:\n input_val_info = helper.find_input_by_name(g, node.input[0])\n g.output.remove(out_val_info)\n g.output.extend([input_val_info])\n else:\n out_val_info = helper.find_value_by_name(g, node.output[0])\n for next_node in next_nodes:\n modhelper.replace_node_input(next_node, node.output[0], node.input[0])\n g.value_info.remove(out_val_info)\n \n node_to_del.append(node)\n \n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n \n other.topological_sort(g)\n\ndef fuse_Transpose_into_Gemm_weight(g):\n node_to_del = []\n for node in g.node:\n # Check pattern\n if node.op_type != 'Gemm':\n continue\n prev_node = helper.find_node_by_output_name(g, node.input[0])\n if prev_node.op_type != 'Flatten':\n continue\n transpose_node = helper.find_node_by_output_name(g, prev_node.input[0])\n if transpose_node.op_type != 'Transpose':\n continue\n # Check attribute\n perm = helper.get_list_attribute_by_name(transpose_node, 'perm', 'int')\n if perm != [0, 2, 3, 1]:\n continue\n transB = helper.get_var_attribute_by_name(node, 'transB', 'int')\n if transB is not None and transB == 1:\n continue\n # Get the original weight\n origin_weight = helper.find_node_by_output_name(g, node.input[1])\n origin_np = helper.constant_to_numpy(origin_weight)\n # Calculate a new weight\n shape = helper.get_shape_from_value_info(helper.find_value_by_name(g, prev_node.input[0]))\n shape.append(-1)\n new_np = np.reshape(origin_np, shape)\n new_np = np.transpose(new_np, [0, 3, 1, 2, 4])\n new_np = np.reshape(new_np, [-1, new_np.shape[-1]])\n new_weight = helper.numpy_to_constant(origin_weight.output[0], new_np)\n # Replace and eliminate\n prev_node.input[0] = transpose_node.input[0]\n node_to_del.append(transpose_node)\n node_to_del.append(origin_weight)\n g.value_info.remove(helper.find_value_by_name(g, transpose_node.output[0]))\n g.node.extend([new_weight])\n\n while node_to_del:\n node = node_to_del.pop()\n g.node.remove(node)\n\n other.topological_sort(g)\n"
] |
[
[
"numpy.reshape",
"numpy.transpose"
]
] |
jirikraus/cuml
|
[
"1176b338e2dc97737977336d7d67a733c9dd4626"
] |
[
"python/cuML/test/test_dbscan.py"
] |
[
"# Copyright (c) 2018, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom cuml import DBSCAN as cuDBSCAN\nfrom sklearn.cluster import DBSCAN as skDBSCAN\nimport cudf\nimport numpy as np\n\n\[email protected]('datatype', [np.float32, np.float64])\[email protected]('input_type', ['dataframe', 'ndarray'])\ndef test_dbscan_predict(datatype, input_type):\n\n X = np.array([[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]],\n dtype=datatype)\n skdbscan = skDBSCAN(eps=3, min_samples=2)\n sk_labels = skdbscan.fit_predict(X)\n\n cudbscan = cuDBSCAN(eps=3, min_samples=2)\n\n if input_type == 'dataframe':\n gdf = cudf.DataFrame()\n gdf['0'] = np.asarray([1, 2, 2, 8, 8, 25], dtype=datatype)\n gdf['1'] = np.asarray([2, 2, 3, 7, 8, 80], dtype=datatype)\n cu_labels = cudbscan.fit_predict(gdf)\n else:\n cu_labels = cudbscan.fit_predict(X)\n\n for i in range(X.shape[0]):\n assert cu_labels[i] == sk_labels[i]\n\n\[email protected]('datatype', [np.float32, np.float64])\ndef test_dbscan_predict_numpy(datatype):\n gdf = cudf.DataFrame()\n gdf['0'] = np.asarray([1, 2, 2, 8, 8, 25], dtype=datatype)\n gdf['1'] = np.asarray([2, 2, 3, 7, 8, 80], dtype=datatype)\n\n X = np.array([[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]],\n dtype=datatype)\n\n print(\"Calling fit_predict\")\n cudbscan = cuDBSCAN(eps = 3, min_samples = 2)\n cu_labels = cudbscan.fit_predict(gdf)\n skdbscan = skDBSCAN(eps = 3, min_samples = 2)\n sk_labels = skdbscan.fit_predict(X)\n print(X.shape[0])\n for i in range(X.shape[0]):\n assert cu_labels[i] == sk_labels[i]\n"
] |
[
[
"numpy.asarray",
"numpy.array",
"sklearn.cluster.DBSCAN"
]
] |
denix56/fcd
|
[
"6f71e311acb0e02b6d9e106d8bac754e883f52bf"
] |
[
"prepare_landsat8_biome.py"
] |
[
"import argparse\nimport random\nfrom collections import namedtuple, defaultdict\nfrom itertools import product\nfrom pathlib import Path\nfrom typing import List\n\nimport cv2\nimport numpy as np\nimport rasterio\nimport tifffile\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm\nimport h5py\nimport joblib\nfrom joblib import Parallel, delayed\nimport contextlib\nfrom multiprocessing import Value\n\n\[email protected]\ndef tqdm_joblib(tqdm_object):\n \"\"\"Context manager to patch joblib to report into tqdm progress bar given as argument\"\"\"\n class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):\n def __call__(self, *args, **kwargs):\n tqdm_object.update(n=self.batch_size)\n return super().__call__(*args, **kwargs)\n\n old_batch_callback = joblib.parallel.BatchCompletionCallBack\n joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback\n try:\n yield tqdm_object\n finally:\n joblib.parallel.BatchCompletionCallBack = old_batch_callback\n tqdm_object.close()\n\n\nBANDS_30M = [\n 'B1', # coastal/aerosol\n 'B2', # blue\n 'B3', # green\n 'B4', # red\n 'B5', # nir\n 'B6', # swir1\n 'B7', # swir2\n 'B9', # cirrus\n 'B10', # tir1\n 'B11' # tir2\n]\nBANDS_15M = [\n # 'B8' # panchromatic\n]\n\nLandsat8Image = namedtuple('Landsat8Image', 'name, biome, bands_15m, bands_30m, manual_cloud_mask qa_cloud_mask')\n\n\ndef prepare_patches(config):\n data_path = Path(config.data_path)\n output_path = Path(config.output_dir)\n patch_size = config.patch_size\n\n output_path.mkdir(exist_ok=True, parents=True)\n for split in [\"train\", \"val\", \"test\"]:\n (output_path / split).mkdir(exist_ok=True, parents=True)\n\n thumbnail_dir = output_path / 'thumbnails'\n thumbnail_dir.mkdir(exist_ok=True)\n\n images = get_landsat8_images(data_path)\n\n # Compute 3:1:2 train/val/test split, and save chosen assignment to file.\n train_val_test = split_train_val_test(images, val_ratio=2 / 12, test_ratio=4 / 12, seed=config.seed)\n\n with open(output_path / 'assignment.txt', mode='w') as f:\n for idx, split in enumerate(train_val_test):\n x = images[idx]\n line = \"{},{},{}\\n\".format(split, x.biome, x.name)\n f.write(line)\n \n train_size = 0\n val_size = 0\n \n print('Computing file sizes...')\n \n with tqdm_joblib(tqdm(desc='Reading L8 tile (1st run)', total=len(images))) as progress_bar:\n def work_func(img_idx, image):\n split = train_val_test[img_idx]\n x, mask = read_image(image)\n assert x.dtype == np.uint16\n \n height, width, _ = x.shape\n patches = list(product(range(0, patch_size * (height // patch_size), patch_size),\n range(0, patch_size * (width // patch_size), patch_size)))\n\n if split == 'test':\n return 0, 0 # use raw images for testing instead of patches\n \n count = 0\n \n for row, col in patches:\n #patch_x = x[row:row + patch_size, col:col + patch_size]\n patch_mask = mask[row:row + patch_size, col:col + patch_size]\n if (patch_mask == 0).all(): # ignore completely invalid patches\n continue\n \n count += 1\n \n train_count = 0\n val_count = 0\n \n if split == 'train':\n train_count = count\n else:\n val_count = count\n return train_count, val_count\n \n result = np.array(Parallel(n_jobs=config.num_workers)(delayed(work_func)(img_idx, image) for img_idx, image in enumerate(images)))\n train_size, val_size = zip(*result)\n train_size = np.sum(train_size)\n val_size = np.sum(val_size)\n \n h5file = 'l8biome.h5'\n \n print('Train patches: {}, validation patches: {}'.format(train_size, val_size))\n \n # in train part\n \n with h5py.File(h5file, 'w') as h5f:\n h5f.attrs.create('classes', ['clear', 'cloudy'])\n \n ds = {}\n for split, size in zip(['train', 'val'], [train_size, val_size]):\n grp = h5f.create_group(split)\n ds[split] = {\n 'images': grp.create_dataset('images', shape=(size, patch_size, patch_size, 10), dtype=np.uint16),\n 'masks': grp.create_dataset('masks', shape=(size, patch_size, patch_size), dtype=np.uint8),\n 'labels': grp.create_dataset('labels', shape=(size,), dtype=np.uint8),\n 'counter_processed': 0\n }\n \n #with tqdm_joblib(tqdm(desc='Reading L8 tile (2nd run)', total=len(images))) as progress_bar:\n def work_func(img_idx, image):\n split = train_val_test[img_idx]\n x, mask = read_image(image)\n assert x.dtype == np.uint16\n\n height, width, _ = x.shape\n patches = list(product(range(0, patch_size * (height // patch_size), patch_size),\n range(0, patch_size * (width // patch_size), patch_size)))\n\n # Create thumbnail of full image for debugging\n thumbnail = np.clip(1.5 * (x[..., [3, 2, 1]].copy() >> 8), 0, 255).astype(np.uint8)\n thumbnail = cv2.resize(thumbnail, (1000, 1000))\n Image.fromarray(thumbnail).save(\n str(thumbnail_dir / '{}_thumbnail_{}_{}.jpg'.format(split, image.biome, image.name)))\n\n if split == 'test':\n return 0, 0 # use raw images for testing instead of patches\n \n num_cloudy = 0\n num_clear = 0\n\n for row, col in patches:\n patch_x = x[row:row + patch_size, col:col + patch_size]\n patch_mask = mask[row:row + patch_size, col:col + patch_size]\n if (patch_mask == 0).all(): # ignore completely invalid patches\n continue\n\n label = 1 if (patch_mask == 2).any() else 0\n \n if split == 'train':\n if label == 1:\n num_cloudy += 1\n else:\n num_clear += 1\n \n ds_idx = ds[split]['counter_processed']\n ds[split]['images'][ds_idx] = patch_x\n ds[split]['masks'][ds_idx] = patch_mask\n ds[split]['labels'][ds_idx] = label\n \n ds[split]['counter_processed'] += 1\n \n return num_cloudy, num_clear\n \n result = [work_func(img_idx, image) for img_idx, image in tqdm(enumerate(images), total=len(images))]\n\n num_cloudy, num_clear = zip(*result)\n num_cloudy = np.sum(num_cloudy)\n num_clear = np.sum(num_clear)\n\n print('Done. Class balance in train: {} cloudy, {} clear'.format(num_cloudy, num_clear))\n\n\ndef split_train_val_test(l8_images: List[Landsat8Image], val_ratio=1 / 10, test_ratio=1 / 10, seed=None):\n # Split images randomly so that each partition contains same number of images from each biome.\n assert val_ratio + test_ratio < 1.0\n\n biome_to_idxs = defaultdict(list)\n for idx, l8_image in enumerate(l8_images):\n biome_to_idxs[l8_image.biome].append(idx)\n\n unique_biomes = biome_to_idxs.keys()\n train_val_test = [None] * len(l8_images)\n\n if seed is not None:\n np.random.seed(seed)\n seeds = np.random.random_sample(len(unique_biomes))\n else:\n seeds = None\n\n for biome_idx, biome in enumerate(unique_biomes):\n num_tiles = len([x for x in l8_images if x.biome == biome])\n val = [\"val\"] * int(val_ratio * num_tiles)\n test = [\"test\"] * int(test_ratio * num_tiles)\n train = [\"train\"] * (num_tiles - (len(val) + len(test)))\n biome_train_val_test = train + val + test\n\n if seeds is not None:\n random.seed(seeds[biome_idx])\n random.shuffle(biome_train_val_test)\n\n for local_idx, global_idx in enumerate(biome_to_idxs[biome]):\n train_val_test[global_idx] = biome_train_val_test[local_idx]\n\n assert len(train_val_test) == len(l8_images)\n return train_val_test\n\n\ndef get_landsat8_images(data_path):\n def band_name(x):\n return str(x).split(\"_\")[-1].replace(\".TIF\", \"\")\n\n landsat8_data = []\n image_dirs = list(data_path.glob('*/*')) # <biome>/<image_name>\n for image_dir in image_dirs:\n biome = image_dir.parts[-2]\n name = image_dir.parts[-1]\n\n bands = list(image_dir.glob(\"**/*.TIF\"))\n bands = dict(map(lambda x: (band_name(x), x), bands))\n\n manual_mask = next(image_dir.glob('**/*.img'))\n qa_mask = bands['BQA']\n\n landsat8_data.append(\n Landsat8Image(\n name=name,\n biome=biome,\n bands_15m=[bands[x] for x in BANDS_15M],\n bands_30m=[bands[x] for x in BANDS_30M],\n manual_cloud_mask=manual_mask,\n qa_cloud_mask=qa_mask\n )\n )\n\n return landsat8_data\n\n\ndef visualize_example_rgb(image, mask=None, num_classes=3):\n if image.dtype == np.uint16:\n image = np.clip(((image / (2 ** 16 - 1)).astype(np.float32) * 2.5), 0, 1)\n if mask is not None:\n f, axes = plt.subplots(1, 2, figsize=(8, 8))\n ax = axes[0]\n ax.imshow(image)\n ax.set_title('Image')\n ax.axis('off')\n\n ax = axes[1]\n ax.imshow(mask, vmin=0, vmax=num_classes)\n ax.set_title('Ground Truth')\n ax.axis('off')\n else:\n plt.figure(figsize=(8, 8))\n plt.imshow(image)\n plt.show()\n\n\ndef read_image(image: Landsat8Image, return_profile=False):\n bands = []\n for band_path in image.bands_30m:\n with rasterio.open(band_path) as f:\n band = f.read()\n bands.append(band)\n profile = f.profile\n x = np.concatenate(bands, axis=0)\n x = np.moveaxis(x, 0, -1)\n\n mask = get_ground_truth(image)\n\n if return_profile:\n return x, mask, profile\n else:\n return x, mask\n\n\ndef get_ground_truth(image: Landsat8Image):\n with rasterio.open(image.manual_cloud_mask) as f:\n mask = f.read().squeeze()\n mask[mask == 0] = 0 # none\n mask[mask == 128] = 1 # Background\n mask[np.logical_or(mask == 192, mask == 255)] = 2 # thin cloud, cloud\n mask[mask == 64] = 1 # Set cloud shadow as background\n return mask\n\n\ndef write_generated_masks(config):\n data_path = Path(config.data_path)\n output_path = Path(config.output_dir)\n patch_size = config.patch_size\n\n tifs_dir = 'outputs/FixedPointGAN_1/results/tifs'\n images = get_landsat8_images(data_path)\n\n with open(output_path / 'assignment.txt') as f:\n train_val_test = [x.split(',')[0] for x in f.read().splitlines()]\n\n patch_ids = {'train': 0, 'val': 0, 'test': 0}\n for img_idx, image in enumerate(tqdm(images, desc='Writing generated masks')):\n split = train_val_test[img_idx]\n split_dir = output_path / split\n if split != 'train':\n continue # we use real labels for evaluation\n generated_mask = tifffile.imread('{}/{}_{}_mask.tif'.format(tifs_dir, image.biome, image.name))\n generated_mask[generated_mask == 0] = 0 # none\n generated_mask[generated_mask == 128] = 1 # Background\n generated_mask[generated_mask == 255] = 2 # cloud\n ground_truth_mask = get_ground_truth(image)\n\n height, width = generated_mask.shape\n patches = list(product(range(0, patch_size * (height // patch_size), patch_size),\n range(0, patch_size * (width // patch_size), patch_size)))\n\n for row, col in patches:\n patch_gt_mask = ground_truth_mask[row:row + patch_size, col:col + patch_size]\n patch_gen_mask = generated_mask[row:row + patch_size, col:col + patch_size]\n if (patch_gt_mask == 0).all(): # ignore patches with only invalid pixels\n continue\n\n label = 'cloudy' if (patch_gt_mask == 2).any() else 'clear'\n\n # If the image-level label is clear, we know the patch contains no clouds. In this case, we can ignore\n # the generated mask, and set the mask as all clear, reducing false positives.\n if label == 'clear':\n patch_gen_mask[patch_gen_mask == 2] = 1\n\n patch_dir = split_dir / label / 'patch_{}'.format(patch_ids[split])\n patch_dir.mkdir(exist_ok=True, parents=True)\n tifffile.imsave(str(patch_dir / \"generated_mask.tif\"), patch_gen_mask)\n patch_ids[split] += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', type=str, default='landsat8-biome', help='Path to downloaded dataset')\n parser.add_argument('--patch_size', type=int, default=128, help='Patch size to divide images into')\n parser.add_argument('--output_dir', type=str, default='data/L8Biome')\n parser.add_argument('--seed', type=int, default=1337, help='Random seed used to split dataset')\n parser.add_argument('--generated_masks', type=str, default=None, help='Write GAN produced cloud masks to data dir.'\n 'Dir should point to tifs produced by '\n 'evaluate.py, for example '\n 'outputs/FixedPointGAN_1/results/tifs')\n parser.add_argument('--num_workers', type=int, default=-1, help='Number of workers')\n config = parser.parse_args()\n if config.generated_masks is not None:\n write_generated_masks(config)\n else:\n prepare_patches(config)\n"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.concatenate",
"numpy.logical_or",
"numpy.moveaxis",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] |
sguzman/timescaleable
|
[
"cbaea85ed512ebb8330a20e37b1ee53d227285fc"
] |
[
"src/main.py"
] |
[
"import numpy\nimport requests\nimport json\nimport psycopg2\nimport os\nimport random\nimport sys\nimport threading\nimport datetime\nimport traceback\nimport queue\nimport asyncio\n\n\nkeys = os.environ['API_KEY'].split('|')\ncores = 1\ntimescale_queue = queue.Queue()\nchunk_size = 50\n\n\ndef connect():\n return psycopg2.connect(user='admin', password='admin', host='192.168.1.63', port='5432', database='youtube')\n\n\ndef start_sql_service():\n conn = connect()\n\n insert_sql = f'INSERT INTO youtube.timeseries.subscriptions (chan_id, subs) VALUES (%s, %s)'\n while True:\n subs, ids, api_key = timescale_queue.get(block=True)\n assert(len(subs) == len(ids))\n cursor = conn.cursor()\n for data in zip(ids, subs):\n cursor.execute(insert_sql, data)\n conn.commit()\n cursor.close()\n print('Insert at', datetime.datetime.now(), len(subs), api_key)\n\n\ndef api_request(channels):\n key = random.choice(keys)\n chans = [list(x.keys())[0] for x in channels]\n serial_id = {}\n for c in channels:\n serial_id[list(c.keys())[0]] = list(c.values())[0]\n\n url = 'https://www.googleapis.com/youtube/v3/channels'\n params = {\n 'part': 'snippet,statistics',\n 'id': ','.join(chans),\n 'key': key\n }\n\n req = requests.get(url, params=params)\n json_body = json.loads(req.text)\n if 'items' not in json_body:\n return None\n\n ids = []\n for item in json_body['items']:\n ids.append(serial_id[item['id']])\n\n return json_body, ids, key\n\n\ndef extract_stats(json_body):\n stats_result = json_body['items']\n\n stats_body = []\n for s in stats_result:\n stats_tmp = s['statistics']\n stats_body.append(int(stats_tmp['subscriberCount']))\n\n return stats_body\n\n\ndef query_channels():\n conn = connect()\n sql = f'SELECT chan_serial, subs, id FROM youtube.entities.chans ORDER BY subs DESC'\n cursor = conn.cursor()\n cursor.execute(sql)\n records = [x for x in cursor.fetchall()]\n\n cursor.close()\n conn.close()\n\n return records\n\n\ndef weighted_distro(chans):\n channels = [{c[0]: c[-1]} for c in chans]\n subs = [c[1] for c in chans]\n total_sum = sum(subs)\n\n weights = [s / total_sum for s in subs]\n return channels, weights\n\n\ndef get_sample(distro, n):\n chans = distro[0]\n weights = distro[1]\n return [numpy.random.choice(chans, p=weights) for x in range(n)]\n\n\nasync def parse_request(distro):\n try:\n sample = get_sample(distro, chunk_size)\n result = api_request(sample)\n if result is None:\n return\n\n json_body, ids, key = result\n\n stats = extract_stats(json_body)\n timescale_queue.put((stats, ids, key))\n except Exception as e:\n print(e, file=sys.stderr)\n traceback.print_exc()\n\n\ndef async_wrapper(distro):\n asyncio.run(parse_request(distro))\n\n\ndef main():\n threading.Thread(target=start_sql_service, daemon=True).start()\n\n chans_weight = query_channels()\n distro = weighted_distro(chans_weight)\n\n def func():\n while True:\n async_wrapper(distro)\n\n for i in range(cores):\n threading.Thread(target=func).start()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.random.choice"
]
] |
VCL3D/DronePose
|
[
"c38a45270105ec2fd873540f384e070e5fc9c204"
] |
[
"importers/intrinsics.py"
] |
[
"import json\r\nimport numpy\r\nimport torch\r\n\r\n\r\ndef load_intrinsics_repository(filename): \r\n #global intrinsics_dict\r\n with open(filename, 'r') as json_file:\r\n intrinsics_repository = json.load(json_file)\r\n intrinsics_dict = dict((intrinsics['Device'], \\\r\n intrinsics['Depth Intrinsics'][0]['1280x720'])\\\r\n for intrinsics in intrinsics_repository)\r\n return intrinsics_dict\r\n\r\ndef get_intrinsics(name, intrinsics_dict, scale=1, data_type=torch.float32):\r\n #global intrinsics_dict\r\n if intrinsics_dict is not None:\r\n intrinsics_data = numpy.array(intrinsics_dict[name])\r\n intrinsics = torch.tensor(intrinsics_data).reshape(3, 3).type(data_type) \r\n intrinsics[0, 0] = intrinsics[0, 0] / scale\r\n intrinsics[0, 2] = intrinsics[0, 2] / scale\r\n intrinsics[1, 1] = intrinsics[1, 1] / scale\r\n intrinsics[1, 2] = intrinsics[1, 2] / scale\r\n intrinsics_inv = intrinsics.inverse()\r\n return intrinsics, intrinsics_inv\r\n raise ValueError(\"Intrinsics repository is empty\")\r\n\r\ndef cameraMatrix(width,height,FOV = 85,n = 0.1,f = 8, AspectRatio = 4./3.):\r\n #get camera intrinsics\r\n t = math.tan(FOV * math.pi/(2 * 180))\r\n f = width/(2 * t)\r\n r = t * AspectRatio\r\n projection_mat = np.array([[f,0,width/2],[0, height/ ( t * AspectRatio),height/2],[0,0,1]])\r\n projection_mat = torch.from_numpy(projection_mat).cuda()\r\n projection_mat = projection_mat.type(torch.float32)\r\n return projection_mat, projection_mat.inverse() \r\n "
] |
[
[
"numpy.array",
"torch.from_numpy",
"torch.tensor"
]
] |
timokau/task-placement
|
[
"41d02ac6f27eec005da90cfd1fd699a2127d4704"
] |
[
"peg_size_experiment.py"
] |
[
"\"\"\"Empirically determine PEG size as a function of input size\"\"\"\n\nimport csv\nimport numpy as np\nfrom scipy import stats\n\nimport baseline_agent\nfrom generator import Generator\nfrom hyperparameters import GENERATOR_DEFAULTS\n\n\ndef _play_episode(emb):\n emb = emb.reset()\n enodes = [len(emb.graph.nodes())]\n edges = [len(emb.graph.edges(keys=True))]\n choices = [len(emb.possibilities())]\n while len(emb.possibilities()) > 0:\n action = baseline_agent.act(emb, randomness=0, rand=np.random)\n emb.take_action(*action)\n enodes.append(len(emb.graph.nodes()))\n edges.append(len(emb.graph.edges(keys=True)))\n choices.append(len(emb.possibilities()))\n return (enodes, edges, choices)\n\n\ndef _main(dirname):\n args = GENERATOR_DEFAULTS.copy()\n args[\"num_sources_dist\"] = lambda r: 1\n rng = np.random.RandomState(1)\n for blocks in [2, 3, 4]:\n filename = f\"{dirname}/peg_edges_b{blocks}.csv\"\n with open(filename, \"w\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow((\"nodes\", \"edges\", \"sem\"))\n for nodes in range(2, 56 + 1, 4):\n # This is intentional.\n # pylint: disable=cell-var-from-loop\n args[\"interm_nodes_dist\"] = lambda r: nodes - 2\n args[\"interm_blocks_dist\"] = lambda r: blocks - 2\n gen = Generator(**args)\n all_edges = []\n for _experiment in range(100):\n embedding = gen.random_embedding(rng)\n # n = len(embedding.infra.nodes())\n # b = len(embedding.overlay.blocks())\n # l = len(embedding.overlay.links())\n # edge_bound = n * (n - 1) * l + 2 * n * l * n + l * n * n\n # enode_bound = n * b + n * l\n (_enodes, edges, _choices) = _play_episode(embedding)\n all_edges.extend(edges)\n print(f\"n{nodes}b{blocks}:\", round(np.average(all_edges)))\n writer.writerow(\n (nodes, np.average(all_edges), stats.sem(all_edges))\n )\n\n\nif __name__ == \"__main__\":\n import sys\n\n _main(sys.argv[1])\n"
] |
[
[
"scipy.stats.sem",
"numpy.random.RandomState",
"numpy.average"
]
] |
jfmendozam/ontotoutra
|
[
"bea4ceafa62500b23495a6de120884ca40f785e9"
] |
[
"webscraping/dataFrame.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 6 00:20:39 2018\n\n@author: jf\n\"\"\"\n\nimport glob\nimport os\nfrom pandas import pandas as pd\nimport matplotlib.pyplot as plt\nimport gensim\nfrom gensim import corpora, models, similarities\n#import gensim.models.doc2vec as d2v\n#from sklearn.linear_model import SGDClassifier\n\nfrom pygeocoder import Geocoder\n\nfrom Country import Country\nfrom Sentiment import Sentiment\n#from pygeocoder import Geocoder\n#import cartopy.crs as ccrs\n\ndf = pd.DataFrame()\npath = r'/home/jf/Documentos/ontologies/web scrapping/workspace/WebScrapping/webscrapping/boyaca_en'\npath = r'/home/jf/Documentos/ontologies/web scrapping/workspace/WebScrapping/webscrapping/colombia_es'\npath = r'/home/jf/Documentos/ontologies/web scrapping/workspace/WebScrapping/webscrapping/colombia_en'\nallFiles = glob.glob(os.path.join(path, \"*.csv\"))\ndf = pd.concat((pd.read_csv(f) for f in allFiles), ignore_index = True)\n\ntmp = Country()\ntmp.df = df\ntmp.toEnglish('country')\ntmp.addCountryAbbr('country')\ndf = tmp.df\n\ntmp = Sentiment()\ntmp.df = df\ntmp.addSentiment('comment')\ndf = tmp.df\n\nmodel = gensim.models.Word2Vec(tmp.corpus, min_count = 1, size = 32)\n\n#tokenCorpus = [nltk.word_tokenize(sent.encode('utf-8')) for sent in df['comment']]\n\n\n#print (df['country'].value_counts().nlargest(20))\n#print (df['score'].groupby(df['country']).value_counts())\n#print (df.groupby(['country', 'score']).country.value_counts().nlargest(15))\n\n\n\n\n\n#plt.barh(df['country'], df['score'], align='center')\n\n#plt.figure(figsize = (10, 3))\n#df['country'].value_counts().plot(kind = 'barh')\n \n\n#location = str('Bogota' + ',' + 'Colombia')\n#result = Geocoder.geocode(location)\n#coords = str(result[0].coordinates)\n#lat = float(coords.split(',')[0].split('(')[1])\n#lon = float(coords.split(',')[1].split(')')[0])\n"
] |
[
[
"pandas.pandas.read_csv",
"pandas.pandas.DataFrame"
]
] |
HaxiSnake/skeleton_frame
|
[
"37954a01d75c8485bfc93bca749dc5c23747d8f3"
] |
[
"processor/recognition.py"
] |
[
"#!/usr/bin/env python\n# pylint: disable=W0201\nimport sys\nimport argparse\nimport yaml\nimport numpy as np\n\n# torch\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# torchlight\n# import torchlight.torchlight as torchlight\n# from torchlight.torchlight import str2bool\n# from torchlight.torchlight import DictAction\n# from torchlight.torchlight import import_class\n\nimport torchlight \nfrom torchlight import str2bool\nfrom torchlight import DictAction\nfrom torchlight import import_class\n\nfrom .processor import Processor\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv1d') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif classname.find('Conv2d') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\nclass REC_Processor(Processor):\n \"\"\"\n Processor for Skeleton-based Action Recgnition\n \"\"\"\n\n def load_model(self):\n self.model = self.io.load_model(self.arg.model,\n **(self.arg.model_args))\n self.model.apply(weights_init)\n self.loss = nn.CrossEntropyLoss()\n \n def load_optimizer(self):\n if self.arg.optimizer == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.arg.base_lr,\n momentum=0.9,\n nesterov=self.arg.nesterov,\n weight_decay=self.arg.weight_decay)\n elif self.arg.optimizer == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.arg.base_lr,\n weight_decay=self.arg.weight_decay)\n else:\n raise ValueError()\n\n def adjust_lr(self):\n if self.arg.optimizer == 'SGD' and self.arg.step:\n lr = self.arg.base_lr * (\n 0.1**np.sum(self.meta_info['epoch']>= np.array(self.arg.step)))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n self.lr = lr\n else:\n self.lr = self.arg.base_lr\n\n def show_topk(self, k):\n rank = self.result.argsort()\n hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]\n accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)\n self.epoch_info['top{}'.format(k)]=100*accuracy\n self.io.print_log('\\tTop{}: {:.2f}%'.format(k, 100 * accuracy))\n\n def show_topk_by_category(self,top_k):\n instance_num, class_num = self.result.shape\n rank = self.result.argsort()\n hit_top_k = [[] for i in range(class_num)]\n for i in range(instance_num):\n l = self.label[i]\n hit_top_k[l].append(l in rank[i, -top_k:])\n\n accuracy_list = []\n for hit_per_category in hit_top_k:\n if hit_per_category:\n accuracy_list.append(sum(hit_per_category) * 1.0 / len(hit_per_category))\n else:\n accuracy_list.append(0.0)\n log='\\tTOP{} By Category:'.format(top_k) + '\\t'.join(['{:.2f}'.format(x * 100) for x in accuracy_list])\n self.io.print_log(log)\n def show_confusion_matrix(self):\n instance_num, class_num = self.result.shape\n matrix = np.zeros((class_num,class_num))\n rank = self.result.argsort()\n for i in range(instance_num):\n t = self.label[i] # true label\n p = rank[i,-1] # predict label\n matrix[t,p]+=1\n matrix_log = ''\n for row in matrix:\n matrix_log += ' '.join(['%3d'%(num) for num in row]) + '\\n'\n matrix_log = 'Confusion Matrix is:\\n' + matrix_log\n self.io.print_log(matrix_log)\n def train(self):\n self.model.train()\n self.adjust_lr()\n loader = self.data_loader['train']\n loss_value = []\n\n for data, label in loader:\n\n # get data\n data = data.float().to(self.dev)\n label = label.long().to(self.dev)\n\n # forward\n output = self.model(data)\n loss = self.loss(output, label)\n\n # backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # statistics\n self.iter_info['loss'] = loss.data.item()\n self.iter_info['lr'] = '{:.6f}'.format(self.lr)\n loss_value.append(self.iter_info['loss'])\n self.show_iter_info()\n self.meta_info['iter'] += 1\n\n self.epoch_info['mean_loss']= np.mean(loss_value)\n self.show_epoch_info()\n self.io.print_timer()\n\n def test(self, evaluation=True):\n\n self.model.eval()\n loader = self.data_loader['test']\n loss_value = []\n result_frag = []\n label_frag = []\n\n for data, label in loader:\n \n # get data\n data = data.float().to(self.dev)\n label = label.long().to(self.dev)\n\n # inference\n with torch.no_grad():\n output = self.model(data)\n result_frag.append(output.data.cpu().numpy())\n\n # get loss\n if evaluation:\n loss = self.loss(output, label)\n loss_value.append(loss.item())\n label_frag.append(label.data.cpu().numpy())\n\n self.result = np.concatenate(result_frag)\n if evaluation:\n self.label = np.concatenate(label_frag)\n self.epoch_info['mean_loss']= np.mean(loss_value)\n self.show_epoch_info()\n\n # show top-k accuracy\n for k in self.arg.show_topk:\n self.show_topk(k)\n self.show_topk_by_category(k)\n self.show_confusion_matrix()\n\n\n @staticmethod\n def get_parser(add_help=False):\n\n # parameter priority: command line > config > default\n parent_parser = Processor.get_parser(add_help=False)\n parser = argparse.ArgumentParser(\n add_help=add_help,\n parents=[parent_parser],\n description='Spatial Temporal Graph Convolution Network')\n\n # region arguments yapf: disable\n # evaluation\n parser.add_argument('--show_topk', type=int, default=[1, 5], nargs='+', help='which Top K accuracy will be shown')\n # optim\n parser.add_argument('--base_lr', type=float, default=0.01, help='initial learning rate')\n parser.add_argument('--step', type=int, default=[], nargs='+', help='the epoch where optimizer reduce the learning rate')\n parser.add_argument('--optimizer', default='SGD', help='type of optimizer')\n parser.add_argument('--nesterov', type=str2bool, default=True, help='use nesterov or not')\n parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay for optimizer')\n # endregion yapf: enable\n\n return parser\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"numpy.concatenate",
"torch.no_grad",
"numpy.mean",
"numpy.array",
"numpy.zeros"
]
] |
SomanathanSubramaniyan/Applied-Databases
|
[
"93e278f0624e715a327ea46d81d14c46cc82cf85"
] |
[
"G00364742.py"
] |
[
"# Applied Database\r\n# Final Project\r\n# Section 4.4 - Python program answers\r\n# Author : Somu\r\n\r\n#mySQL modules import\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\nfrom mysql.connector import errorcode\r\nimport pandas as pd\r\n#Mongo modules import\r\nimport pymongo\r\nfrom pymongo import MongoClient\r\n#Pandas printing module\r\nfrom tabulate import tabulate\r\n\r\n# This function will display a Menu as requested in the project specification\r\ndef menu():\r\n print(\"--------\")\r\n print(\"World DB\")\r\n print(\"--------\")\r\n print(\"Menu\")\r\n print(\"====\")\r\n print(\"1 - View 15 Cities\")\r\n print(\"2 - View Cities by population\")\r\n print(\"3 - Add New City\")\r\n print(\"4 - Find Car by Engine Size\")\r\n print(\"5 - Add New Car\")\r\n print(\"6 - View Countries by name\")\r\n print(\"7 - View Countries by population\")\r\n print(\"x - Exit application\")\r\n\r\nmyclient = None\r\nglobal dfp, df\r\ndfp =\"\"\r\ndf = pd.DataFrame()\r\n\r\ndef Mongoconnect(csize,choice,id,reg,size):\r\n try:\r\n global myclient\r\n myclient =pymongo.MongoClient(host = \"localhost\",port=27017)\r\n myclient.admin.command('ismaster')\r\n mydb = myclient['docs']\r\n docs = mydb[\"docs\"]\r\n if choice == \"4\":\r\n query = {\"car.engineSize\":float(csize)}\r\n car = docs.find(query)\r\n for p in car:\r\n print ('{0} | {1} | {2} '.format(p[\"_id\"],p[\"car\"],p[\"addresses\"]))\r\n if choice == \"5\":\r\n query={\"_id\":int(id), \"car\": { \"reg\":reg,\"engineSize\":float(size)}}\r\n x = docs.insert_one(query)\r\n query = {\"_id\":int(id)}\r\n car = docs.find(query)\r\n for p in car:\r\n print (p)\r\n except :\r\n print (\"******Error Occurred while executing Mongo commands******\")\r\n \r\ndef globalSet ():\r\n global dfp\r\n dfp = \"2\"\r\n\r\ndef DBconnection(query,choice,code,param1):\r\n try:\r\n connection = mysql.connector.connect(host='localhost',database='world', user='root', password='Somu@1975')\r\n cursor = connection.cursor(prepared=True)\r\n global dfp,df\r\n if (choice == \"6\" or choice == \"7\") and dfp != \"2\" :\r\n df = pd.read_sql_query(query, connection)\r\n globalSet()\r\n\r\n if choice == \"1\" :\r\n cursor.execute(query) \r\n names = list(map(lambda x: x[0], cursor.description))\r\n print(\"----------------------------------------------------------------------------------\")\r\n print(\"{:5} | {:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4]))\r\n print(\"----------------------------------------------------------------------------------\")\r\n for (id,name, countrycode, district,population, latitue,longitude) in cursor:\r\n print(\"{:5} | {:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population))\r\n elif choice == \"2\" :\r\n cursor.execute(query) \r\n names = list(map(lambda x: x[0], cursor.description))\r\n print(\"----------------------------------------------------------------------------------\")\r\n print(\"{:5} | {:^20} | {:^12} | {:^20} | {:10}\".format(names[0],names[1],names[2],names[3],names[4]))\r\n print(\"----------------------------------------------------------------------------------\")\r\n for (id,name, countrycode, district,population, latitue,longitude) in cursor:\r\n print(\"{:5} | {:^20} | {:^12} | {:^20} | {:d}\".format(id,name, countrycode, district,population))\r\n elif choice == \"3\":\r\n cursor.execute(query) \r\n connection.commit\r\n print(\"**** RESULT ***** The new city record is inserted into the table\")\r\n elif choice == \"6\" :\r\n df1 = df[df[\"Name\"].str.contains(code)].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]]\r\n #print tabulate(df1.to_string(index=False))\r\n print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\"))\r\n elif choice == \"7\":\r\n if param1 == \">\":\r\n df1 = df[(df[\"population\"] > int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]]\r\n elif param1 == \"<\":\r\n df1 = df[(df[\"population\"] < int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]]\r\n elif param1 == \"=\":\r\n df1 = df[(df[\"population\"] == int(code)) ].loc[:,[\"Name\",\"Continent\",\"population\",\"HeadofState\"]]\r\n print(tabulate(df1, headers=\"keys\",tablefmt=\"orgtbl\"))\r\n\r\n except mysql.connector.Error as error :\r\n if error.errno == errorcode.ER_ACCESS_DENIED_ERROR:\r\n print(\"Something is wrong with your user name or password\")\r\n elif error.errno == errorcode.ER_BAD_DB_ERROR:\r\n print(\"Database does not exist\")\r\n elif error.errno == 1452:\r\n print(\"----------------------------------------------------\")\r\n print(\"***ERROR***: Country Code \"+ code + \" does not exist\")\r\n print(\"----------------------------------------------------\")\r\n\r\n else:\r\n print(\"Failed to connect to the database: {}\".format(error))\r\n connection.rollback()\r\n\r\n finally:\r\n #closing database connection.\r\n if(connection.is_connected()):\r\n connection.close()\r\n\r\ndef displaymenu():\r\n print(\"This is not a valid choice. You can only choose from the above options\")\r\n input(\"\\nPress enter to continue...\")\r\n\r\ndef main():\r\n while True:\r\n menu()\r\n choice = input(\"Choice : --> \")\r\n Code,param1 = \"\",\"\"\r\n if choice == \"x\":\r\n print(\"Bye - Program Terminate now and welcome back anytime!\")\r\n return\r\n elif choice == \"1\":\r\n query= \"select * from city limit 15\"\r\n DBconnection (query, choice,Code,param1)\r\n elif choice == \"2\":\r\n print(\"Cities by Population\")\r\n print(\"--------------------\")\r\n \r\n while True:\r\n Comparison = input(\"Enter <, > or = :\")\r\n if Comparison == \"<\" or Comparison == \">\" or Comparison == \"=\":\r\n query = \"select * from city where population\" + Comparison\r\n break\r\n else:\r\n displaymenu()\r\n \r\n while True:\r\n Value= input(\"Enter Population :\")\r\n if Value.isdigit() == True:\r\n query = query + str(Value)\r\n break\r\n else:\r\n displaymenu()\r\n DBconnection (query, choice,Code,param1)\r\n elif choice == \"3\":\r\n print(\"Add New City\")\r\n print(\"------------\")\r\n City= input(\"Enter City Name :\")\r\n Code= input(\"Country Code :\")\r\n district= input(\"District :\")\r\n pop= input(\"Population :\")\r\n query = \"Insert INTO city (name, countrycode,district,population) VALUES ('\" + City + \"','\" + Code + \"','\" + district + \"',\"+ str(pop)+\")\"\r\n DBconnection (query, choice, Code,param1)\r\n elif choice == \"6\":\r\n print(\"Countries by Name\")\r\n print(\"-----------------\")\r\n Ctyname = input(\"Enter Country Name :\")\r\n query = \"select code, Name, Continent,population,HeadofState from country\" \r\n Code=Ctyname\r\n DBconnection (query, choice, Code,param1)\r\n elif choice == \"7\":\r\n print(\"Countries by Population\")\r\n print(\"-----------------------\")\r\n query = \"select code, Name, Continent,population,HeadofState from country\" \r\n while True:\r\n Comparison = input(\"Enter <, > or = :\")\r\n if Comparison == \"<\" or Comparison == \">\" or Comparison == \"=\":\r\n param1=Comparison\r\n break\r\n else:\r\n displaymenu()\r\n while True:\r\n Value= input(\"Enter Population :\")\r\n if Value.isdigit() == True:\r\n Code = Value\r\n break\r\n else:\r\n displaymenu()\r\n DBconnection (query, choice, Code,param1)\r\n elif choice == \"4\":\r\n print(\"show cars by engine size\")\r\n print(\"------------------------\")\r\n while True:\r\n csize = input(\"Enter Car Engine Size :\")\r\n if csize.isdigit() == True:\r\n csize = csize\r\n break\r\n else:\r\n displaymenu()\r\n Mongoconnect(csize,choice,\"\",\"\",\"\")\r\n elif choice == \"5\":\r\n print(\"Add New Car\")\r\n print(\"-----------\")\r\n id= input(\"_ids:\")\r\n reg= input(\"Enter reg :\")\r\n size= input(\"Enter Size :\")\r\n Mongoconnect(\"\",choice,id,reg,size)\r\n \r\n\r\n else:\r\n print(\"That is not a valid choice. You can only choose from the menu.\")\r\n input(\"\\nPress enter to continue...\")\r\n\r\nif __name__ == \"__main__\":\r\n main()"
] |
[
[
"pandas.read_sql_query",
"pandas.DataFrame"
]
] |
nickwinters1/wintersdata
|
[
"b85f9c1dc2d2852746d01f386748e99abac0f727"
] |
[
"functions.py"
] |
[
"\n#!/usr/bin/env python\n\"\"\"\nThis file contains helper classes for working with DataFrames\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nclass Cleaner:\n \"\"\"\n The Cleaner Class is meant to provide a quick self cleaning of\n Python DataFrames\n \"\"\"\n df = pd.DataFrame()\n\n def __init__(self, df):\n \"\"\"\n Initialization of the Class, where it takes a DataFrame\n \"\"\"\n self.df=df\n\n def dropna(self):\n \"\"\"\n Drop NA values\n \"\"\"\n self.df.dropna()\n\n def retdf(self):\n \"\"\"\n Return the DF\n \"\"\"\n return self.df\n\n def desc(self):\n \"\"\"\n Describe the DF\n \"\"\"\n return self.df.describe()\n"
] |
[
[
"pandas.DataFrame"
]
] |
foamliu/Image-Captioning
|
[
"972a3cfb56d0b43dde4ed3752992eb99489be53d"
] |
[
"model.py"
] |
[
"import keras.backend as K\nimport tensorflow as tf\nfrom keras.layers import Input, Dense, CuDNNLSTM, Concatenate, Embedding, RepeatVector, TimeDistributed, Dropout\nfrom keras.models import Model\nfrom keras.utils import plot_model\n\nfrom config import max_token_length\nfrom config import vocab_size, embedding_size\n\n\ndef build_model():\n # word embedding\n text_input = Input(shape=(max_token_length,), dtype='int32')\n x = Embedding(input_dim=vocab_size, output_dim=embedding_size)(text_input)\n x = CuDNNLSTM(256, return_sequences=True)(x)\n text_embedding = TimeDistributed(Dense(embedding_size))(x)\n\n # image embedding\n image_input = Input(shape=(2048,))\n x = Dense(embedding_size, activation='relu', name='image_embedding')(image_input)\n # the image I is only input once\n image_embedding = RepeatVector(1)(x)\n\n # language model\n x = [image_embedding, text_embedding]\n x = Concatenate(axis=1)(x)\n x = Dropout(0.1)(x)\n x = CuDNNLSTM(1024, return_sequences=True, name='language_lstm_1')(x)\n x = Dropout(0.2)(x)\n x = CuDNNLSTM(1024, name='language_lstm_2')(x)\n x = Dropout(0.4)(x)\n output = Dense(vocab_size, activation='softmax', name='output')(x)\n\n inputs = [image_input, text_input]\n model = Model(inputs=inputs, outputs=output)\n return model\n\n\nif __name__ == '__main__':\n with tf.device(\"/cpu:0\"):\n model = build_model()\n print(model.summary())\n plot_model(model, to_file='model.svg', show_layer_names=True, show_shapes=True)\n\n K.clear_session()\n"
] |
[
[
"tensorflow.device"
]
] |
ValIlya/gopro-webcam
|
[
"3591cf918e535485d14bed96d147d36268289fde"
] |
[
"src/motion_detection.py"
] |
[
"from typing import Iterable, Dict\n\nimport cv2\nimport imutils\nimport numpy as np\n\n\nclass MotionDetector:\n MAX_FRAMES = 5 # averaging n last frames to get stable reference\n GAUSS_KERNEL_SIZE = 21 # blurring\n DELTA_THRESHOLD = 20 # seeking pixels with changed brightness more than DELTA_THRESHOLD\n MIN_AREA = 200 # clusters with area more than MIN_AREA are considered\n\n def __init__(self):\n self.stream = None\n self.frames = []\n\n def process_frame(self, frame: np.ndarray):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (self.GAUSS_KERNEL_SIZE, self.GAUSS_KERNEL_SIZE), 0)\n return gray\n\n def update_frame_buffer(self, gray):\n if len(self.frames) >= self.MAX_FRAMES:\n self.frames.pop(0)\n self.frames.append(gray)\n\n def get_motion_contours(self, frame) -> tuple:\n gray = self.process_frame(frame)\n contours = tuple()\n if len(self.frames) >= self.MAX_FRAMES:\n avg_frames = np.mean(self.frames, axis=0).astype(gray.dtype)\n frame_delta = cv2.absdiff(avg_frames, gray)\n _, thresh = cv2.threshold(src=frame_delta, thresh=self.DELTA_THRESHOLD, maxval=255, type=cv2.THRESH_BINARY)\n thresh = cv2.dilate(thresh, None, iterations=2)\n contours = cv2.findContours(\n thresh.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE\n )\n contours = imutils.grab_contours(contours)\n contours = tuple([\n contour\n for contour in contours\n if cv2.contourArea(contour) >= self.MIN_AREA\n ])\n\n self.update_frame_buffer(gray)\n\n return contours\n\n def draw_contours(self, frame: np.ndarray, contours: tuple) -> np.ndarray:\n for contour in contours:\n (x, y, w, h) = cv2.boundingRect(contour)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.drawContours(frame, contour, -1, (0, 0, 255), 3)\n return frame\n\n def start(self, stream: Iterable[np.ndarray]) -> Iterable[Dict[str, np.ndarray]]:\n\n for stream_info in stream:\n frame = stream_info['frame'].copy()\n contours = self.get_motion_contours(frame)\n frame = self.draw_contours(frame, contours)\n\n yield {\n **stream_info,\n 'contours': contours,\n 'frame_with_contours': frame,\n }\n"
] |
[
[
"numpy.mean"
]
] |
OPHoperHPO/dcgan-lentach-logo-generator
|
[
"9ce626866fa28f1f4dc368f4a90909d0981c8f67"
] |
[
"generate_single_image.py"
] |
[
"# Lentach logo generator - DCGAN Keras.\n# Modified by Anodev Development.\n\n# Imports\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras.models import load_model\n\n\ndef generate_single_image(model_path, image_save_path):\n # Save 1 generated image for demonstration purposes using matplotlib.pyplot.\n random_noise_dimension = 100\n noise = np.random.normal(0, 1, (1, random_noise_dimension))\n\n # Load model\n generator = load_model(model_path)\n\n # Predict image\n generated_images = generator.predict(noise)\n generated_images = 0.5 * generated_images + 0.5\n\n\t# Create an image using matplotlib.\n plt.imshow(generated_images[0, :], cmap='spring')\n plt.axis('off')\n plt.savefig(image_save_path)\n plt.close()\n\n\nif __name__ == '__main__':\n\t# Image generation using pre-trained model\n generate_single_image(\"pretrained_models/lentach_generator_2200.h5\", \"output_by_pretrained_model.jpg\")\n # Generate image\n generate_single_image(\"saved_models/lentach_generator.h5\", \"output.jpg\")\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"numpy.random.normal",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis"
]
] |
multiparty/datathon
|
[
"dcb014b7de84e1ff559acd045492762dd1b73ff6"
] |
[
"parsing_scripts/parse.py"
] |
[
"import pandas as pd\nimport glob\n\ndef combine(path_to_files, primary_key, output_path):\n li = []\n\n for filename in glob.glob(path_to_files):\n print(filename)\n df = pd.read_csv(filename, index_col=False)\n if filename == \"../synthetic_data/trial_2\\syn_adef.csv\":\n print(len(df))\n df = df[df[\"PARAMCD\"] == \"TIME2DTH\"]\n print(len(df))\n sub_li = []\n for col_name in list(df):\n sub_df = df.groupby([primary_key])[col_name].apply(list)\n try:\n sub_df[primary_key] = df[primary_key].apply(lambda x: x[0])\n except:\n pass\n sub_li.append(sub_df)\n df = pd.concat(sub_li, axis=1)\n li.append(df)\n\n frame = pd.concat(li, axis=1)\n del frame[primary_key]\n frame = frame.dropna()\n frame.to_csv(output_path)\n\nif __name__ == \"__main__\":\n # path_to_files = \"../synthetic_data/trial_1/*.csv\"\n # primary_key = \"MASK_ID\"\n\n path_to_files = \"../synthetic_data/trial_2/*.csv\"\n primary_key = \"SUBJID\"\n output_path = \"../parsed_data/combined_trial_2.csv\"\n\n filter_df = combine(path_to_files=path_to_files, primary_key=primary_key, output_path=output_path)"
] |
[
[
"pandas.concat",
"pandas.read_csv"
]
] |
tinkuy-adm/tinkuy-clustering
|
[
"4232f0e19963645127f74aee0e90f8f965590a9d"
] |
[
"src/outlier_detection.py"
] |
[
"from sklearn.cluster import DBSCAN\nimport numpy as np\n\n#CONSTANTES\n#METERS = 80\n#MIN_SAMPLES = 10\n\n#Se recibe una lista de listas ([longitud,latitud])\n#Se devuelven los labels de los puntos (los que tengan -1 son outliers)\ndef eliminate_outliers(points,meters,minsam):\n array = np.array(points)\n lat_distance = (meters/1000)/111.32\n clustering = DBSCAN(eps=lat_distance, min_samples=minsam).fit(array)\n return clustering.labels_\n"
] |
[
[
"numpy.array",
"sklearn.cluster.DBSCAN"
]
] |
Mellechowicz/PoVaB
|
[
"5311f668a71d75217dfc8e95ab7575fd0df49ad6"
] |
[
"PoVaB/__main__.py"
] |
[
"#!/usr/bin/python3\n\nimport numpy as np\nimport defusedxml.ElementTree as ET\nfrom sys import argv\nfrom PoVaB.argv import Options\nfrom PoVaB.tags import Tags\n\nif __name__ == '__main__':\n opt = Options(*argv)\n root = ET.parse(opt('vasprun')).getroot()\n\n tags = Tags(opt('vasprun'))\n\n filename = opt('outfile')\n if filename[-4:] == '.png':\n filename = filename[:-4]\n o = opt('orbitals')\n ions=[ int(a) for a in opt('ions').split(',')]\n efermi = opt('efermi')\n eLimit = [ np.float(a) for a in opt('range').split(',')]\n labels = opt('labels')\n dpi = opt('dpi')\n\n #\n # mmmmm # # #\n # # \"# mmm mmm mmm# #mmm mmm m mm mmm# mmm\n # #mmmm\" #\" # \" # #\" \"# #\" \"# \" # #\" # #\" \"# # \"\n # # \"m #\"\"\"\" m\"\"\"# # # # # m\"\"\"# # # # # \"\"\"m\n # # \" \"#mm\" \"mm\"# \"#m## ##m#\" \"mm\"# # # \"#m## \"mmm\"\n\n eigenvalues = {\n 'dimensions': {},\n 'fields' : {},\n 'spagetti' : [[],[]]\n }\n\n data = root.find('calculation').find('projected').find('eigenvalues')[0]\n for d in data.findall('dimension'):\n eigenvalues['dimensions'][int(d.attrib['dim'])-1]= d.text\n # indexing from 0\n for i,f in enumerate(data.findall('field')):\n eigenvalues['fields'][i]= f.text\n array = data.find('set')\n for (s,spin) in enumerate(array):\n for kpoint in spin:\n local = []\n for entry in kpoint:\n local.append(np.fromstring(entry.text,sep=' '))\n eigenvalues['spagetti'][s].append(np.array(local))\n del data,array\n\n projections = {\n 'dimensions': {},\n 'fields' : {},\n 'spagetti' : [[],[],[],[]]\n }\n data = root.find('calculation').find('projected').find('array')\n for d in data.findall('dimension'):\n eigenvalues['dimensions'][int(d.attrib['dim'])-1]= d.text\n # indexing from 0\n for i,f in enumerate(data.findall('field')):\n eigenvalues['fields'][i]= f.text\n array = data.find('set')\n for (s,spin) in enumerate(array):\n for kpoint in spin:\n projections['spagetti'][s].append([])\n for band in kpoint:\n local = []\n for ion in band:\n local.append(np.fromstring(ion.text,sep=' '))\n projections['spagetti'][s][-1].append(np.array(local))\n del data,array\n\n data = root.find('kpoints').find('generation')\n TOTAL_KPOINTS = int(data.find('i').text)\n high_symmetry_path = []\n for v in data.findall('v'):\n high_symmetry_path.append(np.fromstring(v.text,sep=' '))\n del data\n\n mul = np.zeros(len(high_symmetry_path)-1)\n for i,_ in enumerate(mul):\n mul[i] = np.linalg.norm(high_symmetry_path[i]-high_symmetry_path[i+1])\n mul /= np.max(mul)\n\n data = root.find('kpoints').find('varray')\n kpoints = []\n for point in data:\n kpoints.append(np.fromstring(point.text,sep=' '))\n del data\n\n atomNames = []\n data = root.find('atominfo').find('array').find('set')\n for rc in data:\n atomNames.append(rc[0].text)\n del data\n\n from itertools import cycle\n import matplotlib.pyplot as plt\n from matplotlib import rcParams\n rcParams.update({'figure.autolayout': True,\n 'text.usetex': True})\n\n # Data for plotting\n\n orbitals = {\n 'sp': [0,1,2,3],\n 'spd': [0,1,2,3,4,5,6,7,8],\n 'spdf': [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],\n 'pd': [1,2,3,4,5,6,7,8],\n 'pdf': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],\n 'df': [4,5,6,7,8,9,10,11,12,13,14,15],\n 's': [0],\n 'p': [1,2,3],\n 'd': [4,5,6,7,8],\n 'f': [9,10,11,12,13,14,15]\n }\n\n atom_types = ''.join(['_%s%d%s'%(n,i+1,o) for i,n in zip(ions,[atomNames[ion] for ion in ions])])\n fig, ax = plt.subplots()\n data = []\n try:\n colors = cycle(['k','r']) if tags['ISPIN'] == 2 else cycle['k']\n except AttributeError:\n colors = cycle['k']\n for i,kpoint in enumerate(projections['spagetti'][0]):\n data.append([])\n for (b,band),color in zip(enumerate(kpoint),colors):\n data[i].append(None)\n if np.all(eigenvalues['spagetti'][0][i][b][0] < efermi + eLimit[0]) or np.all(eigenvalues['spagetti'][0][i][b][0] > efermi + eLimit[1]):\n continue\n occ = 0.0\n for ion in ions:\n occ += np.sum(band[ion,orbitals[o]])\n if occ < 1e-4:\n continue\n ax.scatter(TOTAL_KPOINTS*np.sum(mul[:i//TOTAL_KPOINTS])+(i%TOTAL_KPOINTS)*mul[i//TOTAL_KPOINTS], eigenvalues['spagetti'][0][i][b][0]-efermi,s=10*np.power(2*occ,2), c=color, alpha=0.9)\n data[i][b] = (i,*kpoints[i],eigenvalues['spagetti'][0][i][b][0]-efermi,occ)\n for i,kpoint in enumerate(data):\n for b,bnd in enumerate(kpoint):\n if bnd:\n with open('data%s_%04d.txt'%(atom_types,b),'a') as out:\n out.write(\"%3d % .8f % .8f % .8f % .8f %.8f\\n\"%bnd)\n xticks = []\n xlabels = []\n for i,a in enumerate(labels):\n xticks.append(TOTAL_KPOINTS*np.sum(mul[:i]))\n xlabels.append(a)\n ax.set_xticks(xticks)\n ax.set_xticklabels(xlabels)\n\n ax.set(xlabel='', ylabel=r'band energy, $E_b - E_f$ (eV)',\n title=r'Band structure projected on %s ions of \\textsuperscript{%s} electrons'%(r''.join([r'%s\\textsubscript{(%d)}'%(n,i+1) for i,n in zip(ions,[atomNames[ion] for ion in ions])]), o))\n ax.grid()\n\n fig.savefig(filename+'.png',dpi=dpi)\n\n exit()\n"
] |
[
[
"numpy.power",
"matplotlib.pyplot.subplots",
"numpy.linalg.norm",
"numpy.all",
"numpy.max",
"numpy.fromstring",
"matplotlib.rcParams.update",
"numpy.array",
"numpy.sum",
"numpy.float"
]
] |
rahul-art/Faulti_waffer
|
[
"3dc1f2a7a990c1f7d4c15c0bf33273b7a38e79e1"
] |
[
"Prediction_Raw_Data_Validation/predictionDataValidation.py"
] |
[
"import sqlite3\r\nfrom datetime import datetime\r\nfrom os import listdir\r\nimport os\r\nimport re\r\nimport json\r\nimport shutil\r\nimport pandas as pd\r\nfrom application_logging.logger import App_Logger\r\n\r\n\r\n\r\n\r\n\r\nclass Prediction_Data_validation:\r\n \"\"\"\r\n This class shall be used for handling all the validation done on the Raw Prediction Data!!.\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n def __init__(self,path):\r\n self.Batch_Directory = path\r\n self.schema_path = 'schema_prediction.json'\r\n self.logger = App_Logger()\r\n\r\n\r\n def valuesFromSchema(self):\r\n \"\"\"\r\n Method Name: valuesFromSchema\r\n Description: This method extracts all the relevant information from the pre-defined \"Schema\" file.\r\n Output: LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, Number of Columns\r\n On Failure: Raise ValueError,KeyError,Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n with open(self.schema_path, 'r') as f:\r\n dic = json.load(f)\r\n f.close()\r\n pattern = dic['SampleFileName']\r\n LengthOfDateStampInFile = dic['LengthOfDateStampInFile']\r\n LengthOfTimeStampInFile = dic['LengthOfTimeStampInFile']\r\n column_names = dic['ColName']\r\n NumberofColumns = dic['NumberofColumns']\r\n\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n message =\"LengthOfDateStampInFile:: %s\" %LengthOfDateStampInFile + \"\\t\" + \"LengthOfTimeStampInFile:: %s\" % LengthOfTimeStampInFile +\"\\t \" + \"NumberofColumns:: %s\" % NumberofColumns + \"\\n\"\r\n self.logger.log(file,message)\r\n\r\n file.close()\r\n\r\n\r\n\r\n except ValueError:\r\n file = open(\"Prediction_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file,\"ValueError:Value not found inside schema_training.json\")\r\n file.close()\r\n raise ValueError\r\n\r\n except KeyError:\r\n file = open(\"Prediction_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file, \"KeyError:Key value error incorrect key passed\")\r\n file.close()\r\n raise KeyError\r\n\r\n except Exception as e:\r\n file = open(\"Prediction_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file, str(e))\r\n file.close()\r\n raise e\r\n\r\n return LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, NumberofColumns\r\n\r\n\r\n def manualRegexCreation(self):\r\n\r\n \"\"\"\r\n Method Name: manualRegexCreation\r\n Description: This method contains a manually defined regex based on the \"FileName\" given in \"Schema\" file.\r\n This Regex is used to validate the filename of the prediction data.\r\n Output: Regex pattern\r\n On Failure: None\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n regex = \"['wafer']+['\\_'']+[\\d_]+[\\d]+\\.csv\"\r\n return regex\r\n\r\n def createDirectoryForGoodBadRawData(self):\r\n\r\n \"\"\"\r\n Method Name: createDirectoryForGoodBadRawData\r\n Description: This method creates directories to store the Good Data and Bad Data\r\n after validating the prediction data.\r\n\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n path = os.path.join(\"Prediction_Raw_Files_Validated/\", \"Good_Raw/\")\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n path = os.path.join(\"Prediction_Raw_Files_Validated/\", \"Bad_Raw/\")\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n\r\n except OSError as ex:\r\n file = open(\"Prediction_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while creating Directory %s:\" % ex)\r\n file.close()\r\n raise OSError\r\n\r\n def deleteExistingGoodDataTrainingFolder(self):\r\n \"\"\"\r\n Method Name: deleteExistingGoodDataTrainingFolder\r\n Description: This method deletes the directory made to store the Good Data\r\n after loading the data in the table. Once the good files are\r\n loaded in the DB,deleting the directory ensures space optimization.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n path = 'Prediction_Raw_Files_Validated/'\r\n # if os.path.isdir(\"ids/\" + userName):\r\n # if os.path.isdir(path + 'Bad_Raw/'):\r\n # shutil.rmtree(path + 'Bad_Raw/')\r\n if os.path.isdir(path + 'Good_Raw/'):\r\n shutil.rmtree(path + 'Good_Raw/')\r\n file = open(\"Prediction_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"GoodRaw directory deleted successfully!!!\")\r\n file.close()\r\n except OSError as s:\r\n file = open(\"Prediction_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while Deleting Directory : %s\" %s)\r\n file.close()\r\n raise OSError\r\n def deleteExistingBadDataTrainingFolder(self):\r\n\r\n \"\"\"\r\n Method Name: deleteExistingBadDataTrainingFolder\r\n Description: This method deletes the directory made to store the bad Data.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n try:\r\n path = 'Prediction_Raw_Files_Validated/'\r\n if os.path.isdir(path + 'Bad_Raw/'):\r\n shutil.rmtree(path + 'Bad_Raw/')\r\n file = open(\"Prediction_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"BadRaw directory deleted before starting validation!!!\")\r\n file.close()\r\n except OSError as s:\r\n file = open(\"Prediction_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while Deleting Directory : %s\" %s)\r\n file.close()\r\n raise OSError\r\n\r\n def moveBadFilesToArchiveBad(self):\r\n\r\n\r\n \"\"\"\r\n Method Name: moveBadFilesToArchiveBad\r\n Description: This method deletes the directory made to store the Bad Data\r\n after moving the data in an archive folder. We archive the bad\r\n files to send them back to the client for invalid data issue.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n now = datetime.now()\r\n date = now.date()\r\n time = now.strftime(\"%H%M%S\")\r\n try:\r\n path= \"PredictionArchivedBadData\"\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n source = 'Prediction_Raw_Files_Validated/Bad_Raw/'\r\n dest = 'PredictionArchivedBadData/BadData_' + str(date)+\"_\"+str(time)\r\n if not os.path.isdir(dest):\r\n os.makedirs(dest)\r\n files = os.listdir(source)\r\n for f in files:\r\n if f not in os.listdir(dest):\r\n shutil.move(source + f, dest)\r\n file = open(\"Prediction_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Bad files moved to archive\")\r\n path = 'Prediction_Raw_Files_Validated/'\r\n if os.path.isdir(path + 'Bad_Raw/'):\r\n shutil.rmtree(path + 'Bad_Raw/')\r\n self.logger.log(file,\"Bad Raw Data Folder Deleted successfully!!\")\r\n file.close()\r\n except OSError as e:\r\n file = open(\"Prediction_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file, \"Error while moving bad files to archive:: %s\" % e)\r\n file.close()\r\n raise OSError\r\n\r\n\r\n\r\n\r\n def validationFileNameRaw(self,regex,LengthOfDateStampInFile,LengthOfTimeStampInFile):\r\n \"\"\"\r\n Method Name: validationFileNameRaw\r\n Description: This function validates the name of the prediction csv file as per given name in the schema!\r\n Regex pattern is used to do the validation.If name format do not match the file is moved\r\n to Bad Raw Data folder else in Good raw data.\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n # delete the directories for good and bad data in case last run was unsuccessful and folders were not deleted.\r\n self.deleteExistingBadDataTrainingFolder()\r\n self.deleteExistingGoodDataTrainingFolder()\r\n self.createDirectoryForGoodBadRawData()\r\n onlyfiles = [f for f in listdir(self.Batch_Directory)]\r\n try:\r\n f = open(\"Prediction_Logs/nameValidationLog.txt\", 'a+')\r\n for filename in onlyfiles:\r\n if (re.match(regex, filename)):\r\n splitAtDot = re.split('.csv', filename)\r\n splitAtDot = (re.split('_', splitAtDot[0]))\r\n if len(splitAtDot[1]) == LengthOfDateStampInFile:\r\n if len(splitAtDot[2]) == LengthOfTimeStampInFile:\r\n shutil.copy(\"Prediction_Batch_files/\" + filename, \"Prediction_Raw_Files_Validated/Good_Raw\")\r\n self.logger.log(f,\"Valid File name!! File moved to GoodRaw Folder :: %s\" % filename)\r\n\r\n else:\r\n shutil.copy(\"Prediction_Batch_files/\" + filename, \"Prediction_Raw_Files_Validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n else:\r\n shutil.copy(\"Prediction_Batch_files/\" + filename, \"Prediction_Raw_Files_Validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n else:\r\n shutil.copy(\"Prediction_Batch_files/\" + filename, \"Prediction_Raw_Files_Validated/Bad_Raw\")\r\n self.logger.log(f, \"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n\r\n f.close()\r\n\r\n except Exception as e:\r\n f = open(\"Prediction_Logs/nameValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error occured while validating FileName %s\" % e)\r\n f.close()\r\n raise e\r\n\r\n\r\n\r\n\r\n def validateColumnLength(self,NumberofColumns):\r\n \"\"\"\r\n Method Name: validateColumnLength\r\n Description: This function validates the number of columns in the csv files.\r\n It is should be same as given in the schema file.\r\n If not same file is not suitable for processing and thus is moved to Bad Raw Data folder.\r\n If the column number matches, file is kept in Good Raw Data for processing.\r\n The csv file is missing the first column name, this function changes the missing name to \"Wafer\".\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n f = open(\"Prediction_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f,\"Column Length Validation Started!!\")\r\n for file in listdir('Prediction_Raw_Files_Validated/Good_Raw/'):\r\n csv = pd.read_csv(\"Prediction_Raw_Files_Validated/Good_Raw/\" + file)\r\n if csv.shape[1] == NumberofColumns:\r\n csv.rename(columns={\"Unnamed: 0\": \"Wafer\"}, inplace=True)\r\n csv.to_csv(\"Prediction_Raw_Files_Validated/Good_Raw/\" + file, index=None, header=True)\r\n else:\r\n shutil.move(\"Prediction_Raw_Files_Validated/Good_Raw/\" + file, \"Prediction_Raw_Files_Validated/Bad_Raw\")\r\n self.logger.log(f, \"Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s\" % file)\r\n\r\n self.logger.log(f, \"Column Length Validation Completed!!\")\r\n except OSError:\r\n f = open(\"Prediction_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured while moving the file :: %s\" % OSError)\r\n f.close()\r\n raise OSError\r\n except Exception as e:\r\n f = open(\"Prediction_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured:: %s\" % e)\r\n f.close()\r\n raise e\r\n\r\n f.close()\r\n\r\n def deletePredictionFile(self):\r\n\r\n if os.path.exists('Prediction_Output_File/Predictions.csv'):\r\n os.remove('Prediction_Output_File/Predictions.csv')\r\n\r\n def validateMissingValuesInWholeColumn(self):\r\n \"\"\"\r\n Method Name: validateMissingValuesInWholeColumn\r\n Description: This function validates if any column in the csv file has all values missing.\r\n If all the values are missing, the file is not suitable for processing.\r\n SUch files are moved to bad raw data.\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n f = open(\"Prediction_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f, \"Missing Values Validation Started!!\")\r\n\r\n for file in listdir('Prediction_Raw_Files_Validated/Good_Raw/'):\r\n csv = pd.read_csv(\"Prediction_Raw_Files_Validated/Good_Raw/\" + file)\r\n count = 0\r\n for columns in csv:\r\n if (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):\r\n count+=1\r\n shutil.move(\"Prediction_Raw_Files_Validated/Good_Raw/\" + file,\r\n \"Prediction_Raw_Files_Validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s\" % file)\r\n break\r\n if count==0:\r\n csv.rename(columns={\"Unnamed: 0\": \"Wafer\"}, inplace=True)\r\n csv.to_csv(\"Prediction_Raw_Files_Validated/Good_Raw/\" + file, index=None, header=True)\r\n except OSError:\r\n f = open(\"Prediction_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured while moving the file :: %s\" % OSError)\r\n f.close()\r\n raise OSError\r\n except Exception as e:\r\n f = open(\"Prediction_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured:: %s\" % e)\r\n f.close()\r\n raise e\r\n f.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] |
[
[
"pandas.read_csv"
]
] |
datesann0109/D_2117
|
[
"07a94c65c622cf2aa9f2a852f1f28e647a5823bd"
] |
[
"ai/utils/model.py"
] |
[
"import torch.nn as nn\nimport torch\nfrom torchvision import models\nimport numpy as np\nimport cv2\n\n\nclass CustomModel(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.segment_model = models.segmentation.fcn_resnet50(pretrained=True)\n sem_classes = [\n '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\n 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'\n ]\n self.sem_class_to_idx = {cls: idx for (\n idx, cls) in enumerate(sem_classes)}\n self.class_dim = 1\n\n base_model = models.vgg19(pretrained=True)\n self.features = base_model.features\n self.avgpool = base_model.avgpool\n self.linear = base_model.classifier[0]\n\n #self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.device = 'cpu'\n\n def _trimming_img(self, img, i):\n\n sum_x = np.sum(img, axis=1)\n sum_y = np.sum(img, axis=0)\n\n try:\n x_min = np.nonzero(sum_x)[0].min()\n x_max = np.nonzero(sum_x)[0].max()\n y_min = np.nonzero(sum_y)[0].min()\n y_max = np.nonzero(sum_y)[0].max()\n except ValueError: # 真っ黒な画像(0, 0, 0)\n print(f\"{i} : All Pixels are zeros.\")\n x_min = y_min = 0\n x_max = img.shape[0]\n y_max = img.shape[1]\n\n # 犬を切り取る\n cropped_img = img[x_min:x_max, y_min:y_max, :]\n\n # 余白を追加して正方形に変更\n cropped_img_h, cropped_img_w = cropped_img[:, :, 0].shape\n if cropped_img_h > cropped_img_w:\n add_pad = cropped_img_h - cropped_img_w\n top = bottom = 0\n left = add_pad // 2\n right = add_pad - left\n elif cropped_img_h < cropped_img_w:\n add_pad = cropped_img_w - cropped_img_h\n left = right = 0\n top = add_pad // 2\n bottom = add_pad - top\n else:\n left = right = top = bottom = 0\n square_img = cv2.copyMakeBorder(\n cropped_img, top, bottom, left, right, cv2.BORDER_CONSTANT, (0, 0, 0))\n square_img = cv2.resize(square_img, (512, 512))\n square_img = square_img.transpose(2, 0, 1)\n\n return square_img\n\n def forward(self, img):\n orig_img = img\n\n # Segmentation Cut\n img = img.to(self.device)\n masks_out = self.segment_model(img)['out']\n normalized_masks = torch.nn.functional.softmax(masks_out, dim=1)\n boolean_dog_masks = (normalized_masks.argmax(\n self.class_dim) == self.sem_class_to_idx['dog'])\n\n masks_np = boolean_dog_masks.float().to('cpu').detach().numpy().copy()\n m3_list = [\n np.array([masks_np[i], masks_np[i], masks_np[i]])\n for i in range(masks_np.shape[0])\n ]\n m3_np = np.array(m3_list)\n img_np = img.float().to('cpu').detach().numpy().copy()\n cut_img_np = img_np * m3_np\n\n cut_trimed_img_np_list = [self._trimming_img(\n cut_img_np[i].transpose(1, 2, 0), i=i) for i in range(cut_img_np.shape[0])]\n cut_img_np = np.array(cut_trimed_img_np_list)\n\n img = torch.from_numpy(cut_img_np.astype(np.float32)).clone()\n\n # BaseModel get vector\n img = img.to(self.device)\n out = self.features(img)\n out = self.avgpool(out)\n out = out.view(out.shape[0], -1)\n out = self.linear(out)\n return out\n"
] |
[
[
"torch.nn.functional.softmax",
"numpy.array",
"numpy.sum",
"numpy.nonzero"
]
] |
atulkum/sequence_prediction
|
[
"3fd4abe7dfcc6d3929edbe60058bd05dc67d0fe3"
] |
[
"neural_ner/model.py"
] |
[
"from __future__ import unicode_literals, print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom data_utils.constant import Constants\nimport logging\n\nfrom crf import CRF_Loss\nfrom model_utils import init_lstm_wt, init_linear_wt, get_word_embd, get_mask\n\nlogging.basicConfig(level=logging.INFO)\n\nclass NER_SOFTMAX_CHAR(nn.Module):\n def __init__(self, vocab, config):\n super(NER_SOFTMAX_CHAR, self).__init__()\n word_emb_matrix = get_word_embd(vocab, config)\n embd_vector = torch.from_numpy(word_emb_matrix).float()\n\n self.word_embeds = nn.Embedding.from_pretrained(embd_vector, freeze=False)\n self.char_embeds = nn.Embedding(len(vocab.char_to_id), config.char_embd_dim, padding_idx=Constants.PAD_ID)\n if config.is_caps:\n self.caps_embeds = nn.Embedding(vocab.get_caps_cardinality(),\n config.caps_embd_dim, padding_idx=Constants.PAD_ID)\n\n self.lstm_char = nn.LSTM(self.char_embeds.embedding_dim,\n config.char_lstm_dim,\n num_layers=1, bidirectional=True, batch_first=True)\n if config.is_caps:\n self.lstm = nn.LSTM(self.word_embeds.embedding_dim + config.char_embd_dim * 2 + config.caps_embd_dim,\n config.word_lstm_dim,\n num_layers=1, bidirectional=True, batch_first=True)\n else:\n self.lstm = nn.LSTM(self.word_embeds.embedding_dim + config.char_embd_dim * 2,\n config.word_lstm_dim,\n num_layers=1, bidirectional=True, batch_first=True)\n\n self.dropout = nn.Dropout(config.dropout_rate)\n self.hidden_layer = nn.Linear(config.word_lstm_dim * 2, config.word_lstm_dim)\n self.tanh_layer = torch.nn.Tanh()\n\n self.hidden2tag = nn.Linear(config.word_lstm_dim, len(vocab.id_to_tag))\n\n self.config = config\n\n init_lstm_wt(self.lstm_char)\n init_lstm_wt(self.lstm)\n init_linear_wt(self.hidden_layer)\n init_linear_wt(self.hidden2tag)\n self.char_embeds.weight.data.uniform_(-1., 1.)\n if config.is_caps:\n self.caps_embeds.weight.data.uniform_(-1., 1.)\n\n def forward(self, batch):\n sentence = batch['words']\n lengths = batch['words_lens']\n\n if self.config.is_caps:\n caps = batch['caps']\n max_length = torch.max(lengths)\n char_emb = []\n word_embed = self.word_embeds(sentence)\n for chars, char_len in batch['chars']:\n seq_embed = self.char_embeds(chars)\n seq_lengths, sort_idx = torch.sort(char_len, descending=True)\n _, unsort_idx = torch.sort(sort_idx)\n seq_embed = seq_embed[sort_idx]\n packed = pack_padded_sequence(seq_embed, seq_lengths, batch_first=True)\n output, hidden = self.lstm_char(packed)\n lstm_feats, _ = pad_packed_sequence(output, batch_first=True)\n lstm_feats = lstm_feats.contiguous()\n b, t_k, d = list(lstm_feats.size())\n\n seq_rep = lstm_feats.view(b, t_k, 2, -1) #0 is fwd and 1 is bwd\n\n last_idx = char_len - 1\n seq_rep_fwd = seq_rep[unsort_idx, 0, 0]\n seq_rep_bwd = seq_rep[unsort_idx, last_idx, 1]\n\n seq_out = torch.cat([seq_rep_fwd, seq_rep_bwd], 1)\n # fill up the dummy char embedding for padding\n seq_out = F.pad(seq_out, (0, 0, 0, max_length - seq_out.size(0)))\n char_emb.append(seq_out.unsqueeze(0))\n\n char_emb = torch.cat(char_emb, 0) #b x n x c_dim\n\n if self.config.is_caps:\n caps_embd = self.caps_embeds(caps)\n word_embed = torch.cat([char_emb, word_embed, caps_embd], 2)\n else:\n word_embed = torch.cat([char_emb, word_embed], 2)\n word_embed = self.dropout(word_embed)\n\n lengths = lengths.view(-1).tolist()\n packed = pack_padded_sequence(word_embed, lengths, batch_first=True)\n output, hidden = self.lstm(packed)\n\n lstm_feats, _ = pad_packed_sequence(output, batch_first=True) # h dim = B x t_k x n\n lstm_feats = lstm_feats.contiguous()\n\n b, t_k, d = list(lstm_feats.size())\n\n h = self.hidden_layer(lstm_feats.view(-1, d))\n h = self.tanh_layer(h)\n logits = self.hidden2tag(h)\n logits = logits.view(b, t_k, -1)\n\n return logits\n\n def neg_log_likelihood(self, logits, y, s_lens):\n log_smx = F.log_softmax(logits, dim=2)\n loss = F.nll_loss(log_smx.transpose(1, 2), y, ignore_index=Constants.TAG_PAD_ID, reduction='none')\n loss = loss.sum(dim=1) / s_lens.float()\n loss = loss.mean()\n return loss\n\n def get_loss(self, logits, y, s_lens):\n loss = self.neg_log_likelihood(logits, y, s_lens)\n if self.config.is_l2_loss:\n loss += self.get_l2_loss()\n return loss\n\n def get_l2_loss(self):\n l2_reg = sum(p.norm(2) for p in self.parameters() if p.requires_grad)\n return self.config.reg_lambda * l2_reg\n\n def predict(self, logit, lengths):\n max_value, pred = torch.max(logit, dim=2)\n return pred\n\nclass NER_SOFTMAX_CHAR_CRF(nn.Module):\n def __init__(self, vocab, config):\n super(NER_SOFTMAX_CHAR_CRF, self).__init__()\n\n self.featurizer = NER_SOFTMAX_CHAR(vocab, config)\n self.crf = CRF_Loss(len(vocab.id_to_tag), config)\n self.config = config\n\n def get_l2_loss(self):\n l2_reg = sum(p.norm(2) for p in self.parameters() if p.requires_grad)\n return self.config.reg_lambda * l2_reg\n\n def forward(self, batch):\n emissions = self.featurizer(batch)\n return emissions\n\n def get_loss(self, logits, y, s_lens):\n if self.config.is_structural_perceptron_loss:\n loss = self.crf.structural_perceptron_loss(logits, y)\n else:\n loss = -1 * self.crf.log_likelihood(logits, y)\n\n loss = loss / s_lens.float()\n loss = loss.mean()\n if self.config.is_l2_loss:\n loss += self.get_l2_loss()\n return loss\n\n def predict(self, emissions, lengths):\n mask = get_mask(lengths, self.config)\n best_scores, pred = self.crf.viterbi_decode(emissions, mask)\n return pred\n"
] |
[
[
"torch.nn.Dropout",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.nn.LSTM",
"torch.cat",
"torch.from_numpy",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Embedding.from_pretrained",
"torch.sort"
]
] |
inacmor/mobiledets-yolov4-pytorch
|
[
"db285fa061e997032a77fa1ee8c954f2eba2a84d",
"db285fa061e997032a77fa1ee8c954f2eba2a84d"
] |
[
"dataset/data_augmentation.py",
"model/net/body.py"
] |
[
"import cv2\nimport numpy as np\nimport random\n\n\ndef flip(img, box, mode):\n s = img.shape[0]\n\n if mode == 'hori' or 'vert':\n if random.random() > 0.5:\n if mode == 'hori':\n img = cv2.flip(img, 1)\n box[:, 1] = s - box[:, 1]\n box[:, 3] = s - box[:, 3]\n box = box[:, [0, 3, 2, 1, 4]]\n else:\n img = cv2.flip(img, 0)\n box[:, 2] = s - box[:, 2]\n box[:, 4] = s - box[:, 4]\n box = box[:, [0, 1, 4, 3, 2]]\n\n if mode == 'mix':\n if random.random() > 0.25:\n flip_id = [1, 0, -1]\n i = random.choice(flip_id)\n\n if i == 1:\n img = cv2.flip(img, 1)\n box[:, 1] = s - box[:, 1]\n box[:, 3] = s - box[:, 3]\n box = box[:, [0, 3, 2, 1, 4]]\n elif i == 0:\n img = cv2.flip(img, 0)\n box[:, 2] = s - box[:, 2]\n box[:, 4] = s - box[:, 4]\n box = box[:, [0, 1, 4, 3, 2]]\n else:\n img = cv2.flip(img, -1)\n box[:, 1] = s - box[:, 1]\n box[:, 3] = s - box[:, 3]\n box = box[:, [0, 3, 2, 1, 4]]\n box[:, 2] = s - box[:, 2]\n box[:, 4] = s - box[:, 4]\n box = box[:, [0, 1, 4, 3, 2]]\n\n return img, box\n\n\ndef rotate(img, box):\n\n s = img.shape[0]\n\n if random.random() > 0.33:\n\n times = [1, 3]\n time_id = random.choice(times)\n\n if time_id == 1:\n img = np.rot90(img)\n box = box[:, [0, 2, 3, 4, 1]]\n box[:, 2] = s - box[:, 2]\n box[:, 4] = s - box[:, 4]\n elif time_id == 3:\n img = np.rot90(np.rot90(np.rot90(img)))\n box = box[:, [0, 2, 3, 4, 1]]\n box[:, 1] = s - box[:, 1]\n box[:, 3] = s - box[:, 3]\n\n return img, box\n\n\ndef convert(img, contrast, bri_low, bri_up):\n\n contrast = random.uniform(1, contrast)\n bright = random.uniform(bri_low, bri_up)\n\n img = contrast * img + bright\n\n img = np.minimum(np.maximum(img, 0), 255)\n\n return img\n\n\ndef background_cutmix(img, box, clips, index, batchsize):\n backup = img.copy()\n\n size = img.shape[0]\n\n top_limit = np.max(box[:, 1])\n lef_limit = np.max(box[:, 0])\n bot_limit = np.max(box[:, 3])\n rig_limit = np.max(box[:, 2])\n\n top = backup[0:random.randint(0, top_limit), :, :]\n left = backup[:, 0:random.randint(0, lef_limit), :]\n bottom = backup[random.randint(bot_limit, size):size, :, :]\n right = backup[:, random.randint(rig_limit, size):size, :]\n\n top_shape = (random.randint(0, top_limit), random.randint(size / 2, size))\n left_shape = (random.randint(size / 2, size), random.randint(0, lef_limit))\n bot_shape = (random.randint(0, size - bot_limit), random.randint(size / 2, size))\n rig_shape = (random.randint(size / 2, size), random.randint(0, size - rig_limit),)\n\n if index < batchsize - 1:\n clips.append((top, left, bottom, right))\n if index == 0:\n return img, clips\n else:\n top_c, left_c, bottom_c, right_c = clips[index - 1]\n\n else:\n top_c = np.hstack((cv2.resize(clips[0][0], top_shape),\n cv2.resize(clips[1][0], top_shape),\n cv2.resize(clips[2][0], top_shape)))\n left_c = np.vstack((cv2.resize(clips[0][1], left_shape),\n cv2.resize(clips[1][1], left_shape),\n cv2.resize(clips[2][1], left_shape)))\n bottom_c = np.hstack((cv2.resize(clips[0][2], bot_shape),\n cv2.resize(clips[1][2], bot_shape),\n cv2.resize(clips[2][2], bot_shape)))\n right_c = np.vstack((cv2.resize(clips[0][3], rig_shape),\n cv2.resize(clips[1][3], rig_shape),\n cv2.resize(clips[2][3], rig_shape)))\n\n top_c = cv2.resize(top_c, (top_shape[1], top_shape[0]))\n left_c = cv2.resize(left_c, (left_shape[1], left_shape[0]))\n bottom_c = cv2.resize(bottom_c, (bot_shape[1], bot_shape[0]))\n right_c = cv2.resize(right_c, (rig_shape[1], rig_shape[0]))\n\n top_startx = random.randint(0, size - top_shape[1])\n top_starty = random.randint(0, top_limit - top_shape[0])\n left_startx = random.randint(0, lef_limit - left_shape[1])\n left_starty = random.randint(0, size - left_shape[0])\n bottom_startx = random.randint(0, size - bot_shape[1])\n bottom_starty = random.randint(bot_limit, size - bot_shape[0])\n right_startx = random.randint(rig_limit, size - rig_shape[1])\n right_starty = random.randint(0, size - rig_shape[0])\n\n img[top_starty:top_starty + top_shape[0], top_startx:top_startx + top_shape[1], :] = top_c\n img[left_starty:left_starty + left_shape[0], left_startx:left_startx + left_shape[1], :] = left_c\n\n img[bottom_starty:bottom_starty + bot_shape[0], bottom_startx:bottom_startx + bot_shape[1], :] = bottom_c\n img[right_starty:right_starty + rig_shape[0], right_startx:right_startx + rig_shape[1], :] = right_c\n\n return img, clips\n\ndef gasuss_noise(image, mean=0, var=0.001):\n\n image = np.array(image/255, dtype=float)\n\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n\n out = image + noise\n\n if out.min() < 0:\n\n low_clip = -1.\n\n else:\n\n low_clip = 0.\n\n out = np.clip(out, low_clip, 1.0)\n\n out = np.uint8(out*255)\n\n return out",
"#encoding=utf-8\n\n'''\n@Time : 2020/11/17 08:40\n@Author : Inacmor\n@File : body.py\n@Noice :\n@Modificattion :\n @Author :\n @Time :\n @Detail :\n\n'''\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom model.net.backbone import Conv_BN_Act, CSPDarknet53\nfrom model.net.csp_mobiledets import CSPMobileDets\n\n\nclass SPP(nn.Module):\n\n def __init__(self, in_channels):\n super(SPP, self).__init__()\n\n self.spp_head = Conv_BN_Act(in_channels, in_channels // 2,\n kernel_size=1, stride=1, activation='leaky')\n\n # #SPP\n self.spp_body1 = nn.MaxPool2d(kernel_size=13, stride=1, padding=13 // 2)\n self.spp_body2 = nn.MaxPool2d(kernel_size=9, stride=1, padding=9 // 2)\n self.spp_body3 = nn.MaxPool2d(kernel_size=5, stride=1, padding=5 // 2)\n\n def forward(self, x):\n\n head = self.spp_head(x)\n p1 = self.spp_body1(head)\n p2 = self.spp_body2(head)\n p3 = self.spp_body3(head)\n\n output = torch.cat([head, p1, p2, p3], 1)\n\n return output\n\nclass FiveStr(nn.Module):\n\n def __init__(self, inchannels):\n super(FiveStr, self).__init__()\n\n self.five_str = nn.ModuleList([Conv_BN_Act(inchannels, inchannels // 2,\n kernel_size=1, stride=1, activation='leaky'),\n Conv_BN_Act(inchannels // 2, inchannels,\n kernel_size=3, stride=1, activation='leaky'),\n Conv_BN_Act(inchannels, inchannels // 2,\n kernel_size=1, stride=1, activation='leaky'),\n Conv_BN_Act(inchannels // 2, inchannels,\n kernel_size=3, stride=1, activation='leaky'),\n Conv_BN_Act(inchannels, inchannels // 2,\n kernel_size=1, stride=1, activation='leaky')])\n\n def forward(self, x):\n\n for layer in self.five_str:\n x = layer(x)\n\n return x\n\nclass OUTPUT(nn.Module):\n\n def __init__(self, in_channels, per_anchors, outs):\n super(OUTPUT, self).__init__()\n\n self.conv1 = Conv_BN_Act(in_channels, in_channels * 2, 3, 1, 'leaky')\n self.conv2 = nn.Conv2d(in_channels * 2, per_anchors * outs, kernel_size=1, stride=1)\n\n def forward(self, x):\n\n x = self.conv1(x)\n x = self.conv2(x)\n\n return x\n\nclass UPSAMPLE(nn.Module):\n\n def __init__(self, in_channels):\n super(UPSAMPLE, self).__init__()\n\n self.conv1 = Conv_BN_Act(in_channels, in_channels // 2, kernel_size=1, stride=1, activation='leaky')\n\n def forward(self, x, up_times, inference=False):\n x = self.conv1(x)\n\n x = F.interpolate(x, size=(x.size(2) * up_times, x.size(3) * up_times), mode='nearest')\n\n return x\n\nclass DOWNSAMPLE(nn.Module):\n\n def __init__(self, in_channels):\n super(DOWNSAMPLE, self).__init__()\n\n self.down = Conv_BN_Act(in_channels, in_channels * 2, kernel_size=3, stride=2, activation='leaky')\n\n def forward(self, x):\n x = self.down(x)\n\n return x\n\nclass PANUP1(nn.Module):\n\n def __init__(self, in_channels):\n super(PANUP1, self).__init__()\n\n self.conv1 = Conv_BN_Act(in_channels, in_channels // 2, 1, 1, 'leaky')\n\n self.conv2 = Conv_BN_Act(in_channels // 2, in_channels, 3, 1, 'leaky')\n\n self.spp = SPP(in_channels)\n\n self.conv3 = Conv_BN_Act(in_channels * 2, in_channels // 2, 1, 1, 'leaky')\n\n self.conv4 = Conv_BN_Act(in_channels // 2, in_channels, 3, 1, 'leaky')\n\n self.conv5 = Conv_BN_Act(in_channels, in_channels // 2, 1, 1, 'leaky')\n\n self.upsample = UPSAMPLE(in_channels // 2)\n\n def forward(self, x, uptimes):\n\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.spp(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n\n up_input = x\n up_output = self.upsample(up_input, uptimes)\n\n return x, up_output\n\nclass PANUP2(nn.Module):\n\n def __init__(self, in_channels):\n super(PANUP2, self).__init__()\n\n self.conv1 = Conv_BN_Act(in_channels, in_channels // 2, 1, 1, 'leaky')\n\n self.five_s = FiveStr(in_channels)\n\n self.upsample = UPSAMPLE(in_channels // 2)\n\n def forward(self, x, up1, uptimes):\n\n x = self.conv1(x)\n\n x = torch.cat([x, up1], 1)\n\n x = self.five_s(x)\n\n up_input = x\n up_output = self.upsample(up_input, uptimes)\n\n return x, up_output\n\nclass PANDOWN1(nn.Module):\n\n def __init__(self, in_channels):\n super(PANDOWN1, self).__init__()\n\n self.conv1 = Conv_BN_Act(in_channels, in_channels // 2, 1, 1, 'leaky')\n self.conv2 = FiveStr(in_channels)\n self.down1 = DOWNSAMPLE(in_channels // 2)\n\n def forward(self, x, up2):\n\n x = self.conv1(x)\n\n x = torch.cat([x, up2], 1)\n x = self.conv2(x)\n\n down1_input = x\n down1_input = self.down1(down1_input)\n\n return x, down1_input\n\nclass PANDOWN2(nn.Module):\n\n def __init__(self, in_channels):\n super(PANDOWN2, self).__init__()\n\n self.five_s = FiveStr(in_channels)\n\n self.down2 = DOWNSAMPLE(in_channels // 2)\n\n def forward(self, x, down1_output):\n\n x = torch.cat([x, down1_output], 1)\n x = self.five_s(x)\n\n down2_input = x\n\n down2_output = self.down2(down2_input)\n\n return x, down2_output\n\n\nclass YOLOBODY(nn.Module):\n\n def __init__(self, in_channels, anchors, num_bbparas, num_classes, freeze=False):\n super(YOLOBODY, self).__init__()\n\n self.backbone = CSPMobileDets(freeze=freeze)\n self.panup1 = PANUP1(in_channels)\n self.five_d32 = FiveStr(in_channels)\n self.outs1 = OUTPUT(in_channels // 2,\n per_anchors=anchors // 3,\n outs=num_bbparas + 1 + num_classes)\n\n self.panup2 = PANUP2(in_channels // 2)\n self.pandown1 = PANDOWN1(in_channels // 4)\n self.outs2 = OUTPUT(in_channels // 4,\n per_anchors=anchors // 3,\n outs=num_bbparas + 1 + num_classes)\n\n self.pandown2 = PANDOWN2(in_channels // 2)\n self.outs3 = OUTPUT(in_channels // 8,\n per_anchors=anchors // 3,\n outs=num_bbparas + 1 + num_classes)\n\n def forward(self, inputs, uptimes1=2, uptimes2=2):\n\n darknet_out1, darknet_out2, darknet_out3 = self.backbone(inputs)\n\n d32, up1_output = self.panup1(darknet_out1, uptimes1)\n\n d16, up2_output = self.panup2(darknet_out2, up1_output, uptimes2)\n\n d08, down1_output = self.pandown1(darknet_out3, up2_output)\n\n d16, down2_output = self.pandown2(d16, down1_output)\n\n d32 = torch.cat([d32, down2_output], 1)\n d32 = self.five_d32(d32)\n\n feats01 = self.outs1(d32)\n feats02 = self.outs2(d16)\n feats03 = self.outs3(d08)\n\n return [feats01, feats02, feats03]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.rot90",
"numpy.maximum",
"numpy.clip",
"numpy.uint8",
"numpy.max",
"numpy.random.normal",
"numpy.array"
],
[
"torch.nn.MaxPool2d",
"torch.nn.Conv2d",
"torch.cat"
]
] |
SenHuang19/EnergyPlus-Volttron-Toolkit
|
[
"f89e68dc143e9ac01c2b07e975d21d64716bf876"
] |
[
"volttron/platform/agent/base_market_agent/poly_line.py"
] |
[
"# -*- coding: utf-8 -*- {{{\n# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:\n#\n# Copyright 2017, Battelle Memorial Institute.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This material was prepared as an account of work sponsored by an agency of\n# the United States Government. Neither the United States Government nor the\n# United States Department of Energy, nor Battelle, nor any of their\n# employees, nor any jurisdiction or organization that has cooperated in the\n# development of these materials, makes any warranty, express or\n# implied, or assumes any legal liability or responsibility for the accuracy,\n# completeness, or usefulness or any information, apparatus, product,\n# software, or process disclosed, or represents that its use would not infringe\n# privately owned rights. Reference herein to any specific commercial product,\n# process, or service by trade name, trademark, manufacturer, or otherwise\n# does not necessarily constitute or imply its endorsement, recommendation, or\n# favoring by the United States Government or any agency thereof, or\n# Battelle Memorial Institute. The views and opinions of authors expressed\n# herein do not necessarily state or reflect those of the\n# United States Government or any agency thereof.\n#\n# PACIFIC NORTHWEST NATIONAL LABORATORY operated by\n# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY\n# under Contract DE-AC05-76RL01830\n# }}}\n\nimport numpy as np\n\n\nclass PolyLine:\n def __init__(self):\n self.points = []\n self.xs = None\n self.ys = None\n self.xsSortedByY = None\n self.ysSortedByY = None\n self._min_x = None\n self._max_x = None\n self._min_y = None\n self._max_y = None\n\n def add(self, point):\n if self.points is None:\n self.points = []\n if len(self.points) > 0:\n for p in reversed(self.points):\n if p.x == point.x and p.y == point.y:\n return\n doSort = False\n # if len(self.points) > 0 and point.y < self.points[-1].y:\n if len(self.points) > 0 and point.x < self.points[-1].x:\n doSort = True\n\n self.points.append(point)\n if doSort:\n self.points.sort()\n self.xs = None\n self.ys = None\n if point.x is not None and point.y is not None:\n self._min_x = PolyLine.min(self._min_x, point.x)\n self._min_y = PolyLine.min(self._min_y, point.y)\n self._max_x = PolyLine.max(self._max_x, point.x)\n self._max_y = PolyLine.max(self._max_y, point.y)\n\n def contains_none(self):\n result = False\n if self.points is not None and len(self.points) > 0:\n for p in self.points:\n if p.x is None or p.y is None:\n result = True\n return result\n\n @staticmethod\n def min(x1, x2):\n if x1 is None:\n return x2\n if x2 is None:\n return x1\n return min(x1, x2)\n\n @staticmethod\n def max(x1, x2):\n if x1 is None:\n return x2\n if x2 is None:\n return x1\n return max(x1, x2)\n\n @staticmethod\n def sum(x1, x2):\n if x1 is None:\n return x2\n if x2 is None:\n return x1\n return x1 + x2\n\n def x(self, y, left=None, right=None):\n if not self.points:\n return None\n if y is None:\n return None\n self.vectorize()\n # return np.interp(y, self.ys, self.xs) #, right=0.) .. we learned that this gave weird results previously\n # ascending = self.ys[0]<self.ys[-1]\n # ys = self.ys if ascending else self.ys[::-1]\n # xs = self.xs if ascending else self.xs[::-1]\n r = np.interp(y, self.ysSortedByY, self.xsSortedByY, left=left, right=right)\n return None if np.isnan(r) else r\n\n def y(self, x, left=None, right=None):\n if not self.points:\n return None\n if x is None:\n return None\n self.vectorize()\n # return np.interp(x, self.xs, self.ys) # this probably doesn't work b/c the xs are not neccesarily in the right order...\n # ascending = self.xs[0]<self.xs[-1]\n # ys = self.ys if ascending else self.ys[::-1]\n # xs = self.xs if ascending else self.xs[::-1]\n r = np.interp(x, self.xs, self.ys, left=left, right=right)\n return None if np.isnan(r) else r\n\n # probably replace w/ zip()\n def vectorize(self):\n if not self.points:\n return None, None\n if (self.xs == None or self.ys == None):\n xs = [None] * len(self.points)\n ys = [None] * len(self.points)\n c = 0\n for p in self.points:\n xs[c] = p.x\n ys[c] = p.y\n c += 1\n self.xs = xs\n self.ys = ys\n if self.ys[0] < self.ys[-1]:\n self.xsSortedByY = self.xs\n self.ysSortedByY = self.ys\n else:\n self.xsSortedByY = self.xs[::-1]\n self.ysSortedByY = self.ys[::-1]\n return self.xs, self.ys\n\n def tuppleize(self):\n if not self.points:\n return None\n ps = [None] * len(self.points)\n c = 0\n for p in self.points:\n ps[c] = p.tuppleize()\n c += 1\n return ps\n\n def min_y(self):\n return self._min_y\n\n def max_y(self):\n return self._max_y\n\n def min_x(self):\n return self._min_x\n\n def max_x(self):\n return self._max_x\n\n @staticmethod\n def determinant(point1, point2):\n return point1[0] * point2[1] - point1[1] * point2[0]\n\n @staticmethod\n def segment_intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n div = PolyLine.determinant(xdiff, ydiff)\n if div == 0:\n return None, None\n d = (PolyLine.determinant(*line1), PolyLine.determinant(*line2))\n x = PolyLine.determinant(d, xdiff) / div\n y = PolyLine.determinant(d, ydiff) / div\n return x, y\n\n @staticmethod\n def ccw(p1, p2, p3):\n return (p3[1] - p1[1]) * (p2[0] - p1[0]) > (p2[1] - p1[1]) * (p3[0] - p1[0])\n\n @staticmethod\n def segment_intersects(l1, l2):\n if l1[0][0] is None or l1[0][1] is None or l1[1][0] is None or l1[1][1] is None:\n return False\n if l2[0][0] is None or l2[0][1] is None or l2[1][0] is None or l2[1][1] is None:\n return False\n if (PolyLine.ccw(l1[0], l2[0], l2[1]) != PolyLine.ccw(l1[1], l2[0], l2[1])\n and PolyLine.ccw(l1[0], l1[1], l2[0]) != PolyLine.ccw(l1[0], l1[1], l2[1])):\n return True\n if (l1[0][0] == l2[0][0] and l1[0][1] == l2[0][1]) or (l1[0][0] == l2[1][0] and l1[0][1] == l2[1][1]):\n return True\n if (l1[1][0] == l2[0][0] and l1[1][1] == l2[0][1]) or (l1[1][0] == l2[1][0] and l1[1][1] == l2[1][1]):\n return True\n\n @staticmethod\n def between(a, b, c):\n if (a[0] is None or a[1] is None or b[0] is None or b[1] is None or c[0] is None or c[1] is None):\n return None\n crossproduct = (c[1] - a[1]) * (b[0] - a[0]) - (c[0] - a[0]) * (b[1] - a[1])\n if abs(crossproduct) > 1e-12:\n return False\n dotproduct = (c[0] - a[0]) * (b[0] - a[0]) + (c[1] - a[1]) * (b[1] - a[1])\n if dotproduct < 0:\n return False\n squaredlengthba = (b[0] - a[0]) * (b[0] - a[0]) + (b[1] - a[1]) * (b[1] - a[1])\n if dotproduct > squaredlengthba:\n return False\n return True\n\n @staticmethod\n def intersection(pl_1, pl_2):\n pl_1 = pl_1.points\n pl_2 = pl_2.points\n\n # we have two points\n if len(pl_1) == 1 and len(pl_2) == 1:\n if pl_1[0][0] == pl_2[0][0] and pl_1[0][1] == pl_2[0][1]:\n quantity = pl_1[0][0]\n price = pl_1[0][1]\n return quantity, price\n\n # we have one point and line segments\n elif len(pl_1) == 1 or len(pl_2) == 1:\n if len(pl_1) == 1:\n point = pl_1[0]\n line = pl_2\n else:\n point = pl_2[0]\n line = pl_1\n for j, pl_2_1 in enumerate(line[:-1]):\n pl_2_2 = line[j + 1]\n if PolyLine.between(pl_2_1, pl_2_2, point):\n quantity = point[0]\n price = point[1]\n return quantity, price\n\n # we have line segments\n elif len(pl_1) > 1 and len(pl_2) > 1:\n for i, pl_1_1 in enumerate(pl_1[:-1]):\n pl_1_2 = pl_1[i + 1]\n for j, pl_2_1 in enumerate(pl_2[:-1]):\n pl_2_2 = pl_2[j + 1]\n if PolyLine.segment_intersects((pl_1_1, pl_1_2), (pl_2_1, pl_2_2)):\n quantity, price = PolyLine.segment_intersection((pl_1_1, pl_1_2), (pl_2_1, pl_2_2))\n return quantity, price\n\n # The lines don't intersect, add the auxillary information\n quantity = None\n price = None\n return quantity, price\n\n @staticmethod\n def compare(demand_curve, supply_curve):\n aux = {}\n demand_max_quantity = demand_curve.max_x()\n demand_min_quantity = demand_curve.min_x()\n supply_max_quantity = supply_curve.max_x()\n supply_min_quantity = supply_curve.min_x()\n demand_max_price = demand_curve.max_y()\n demand_min_price = demand_curve.min_y()\n supply_max_price = supply_curve.max_y()\n supply_min_price = supply_curve.min_y()\n\n aux['SQn,DQn'] = cmp(supply_min_quantity,demand_min_quantity)\n aux['SQn,DQx'] = cmp(supply_min_quantity,demand_max_quantity)\n aux['SQx,DQn'] = cmp(supply_max_quantity,demand_min_quantity)\n aux['SQx,DQx'] = cmp(supply_max_quantity,demand_max_quantity)\n\n aux['SPn,DPn'] = cmp(supply_min_price,demand_min_price)\n aux['SPn,DPx'] = cmp(supply_min_price,demand_max_price)\n aux['SPx,DPn'] = cmp(supply_max_price,demand_min_price)\n aux['SPx,DPx'] = cmp(supply_max_price,demand_max_price)\n return aux\n\n\n"
] |
[
[
"numpy.isnan",
"numpy.interp"
]
] |
SIGKDDanon/SIGKDD2021DeAnonV2
|
[
"76f0373ec42ab55feefed3f4ce4bf4d532b51dd2",
"76f0373ec42ab55feefed3f4ce4bf4d532b51dd2"
] |
[
"PostDiffMixture/simulations_folder/simulation_analysis_scripts/checkpickle_EFFECT_new.py",
"PostDiffMixture/simulations_folder/simulation_scripts/run_effect_size_simulations_beta.py"
] |
[
"import matplotlib\nmatplotlib.use('Agg')\n#matplotlib.use(\"gtk\")\n#matplotlib.use('Qt5Agg')\nfrom table_functions import *\nimport pickle\nimport os\nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport sys\n#sys.path.insert()\n# print(data)\nimport numpy as np\nimport os\nfrom scipy import stats\nfrom matplotlib.pyplot import figure\nimport glob\nimport numpy as np\nfrom hist_functions import *\nimport scipy.stats\nfrom pathlib import Path\n # ipdb.set_trace()\nimport ipdb\nfrom scatter_plot_functions import *\nfrom rectify_vars_and_wald_functions import *\nfrom checkpickle import stacked_bar_plot_with_cutoff, process_model_dir_list, get_metric_numsteps\n#from checkpickle import *\nfrom phi_functions import *\n\n\nTABLE_3_KEY_UR = \"UR-c={}n={}num_steps={}\"\nTABLE_3_KEY_TS = \"TS-c={}n={}num_steps={}\"\nTABLE_3_KEY_TSPPD = \"TSPPD-c={}n={}num_steps={}\"\nTABLE_3_KEY_ETS = \"ETS-c={}n={}num_steps={}\"\nTABLE_3_KEY_PPDG = \"PPDG-c={}n={}num_steps={}\"\nTABLE_3_KEY_EG = \"EG-c={}n={}num_steps={}\"\n\nSMALL_SIZE = 13\nMEDIUM_SIZE = 10\nBIGGER_SIZE = 14\n\nplt.rc('font', size=SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=8.5) # fontsize of the tick labels\nplt.rc('ytick', labelsize=8.5) # fontsize of the tick labels\nplt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n\n\ndef plot_hist_and_table(df_for_num_steps_eg0pt1, df_for_num_steps_eg0pt3, df_for_num_steps_ts, df_for_num_steps_unif, num_steps, epsilon, n):\n fig_h, ax_h = plt.subplots()\n proportions_unif = df_for_num_steps_unif['sample_size_1'] / num_steps\n proportions_eg0pt1 = df_for_num_steps_eg0pt1['sample_size_1'] / num_steps\n proportions_eg0pt3 = df_for_num_steps_eg0pt3['sample_size_1'] / num_steps\n proportions_ts = df_for_num_steps_ts['sample_size_1'] / num_steps\n \n ax_h.hist(proportions_eg0pt1, alpha = 0.5, label = \"Epsilon Greedy 0.1\")\n ax_h.hist(proportions_eg0pt3, alpha = 0.5, label = \"Epsilon Greedy 0.3\")\n ax_h.hist(proportions_unif, alpha = 0.5, label = \"Uniform Random\")\n ax_h.hist(proportions_ts, alpha = 0.5, label = \"Thompson Sampling\")\n ax_h.legend()\n fig_h.suptitle(\"Histogram of Proportion of {} Participants Assigned to Condition 1 Across 500 Simulations\".format(num_steps))\n # rows = [\"Areferg\"]\n # columns = [\"Berger\"]\n # cell_text = [\"ergerg\"]\n # the_table = ax_h.table(cellText=cell_text,\n # rowLabels=rows,\n # colLabels=columns,\n # loc='right')\n\n # fig_h.subplots_adjust(left=0.2, wspace=0.4)\n data = np.random.uniform(0, 1, 80).reshape(20, 4)\n mean_ts = np.mean(proportions_ts)\n var_ts = np.var(proportions_ts)\n\n mean_eg0pt1 = np.mean(proportions_eg0pt1)\n mean_eg0pt3 = np.mean(proportions_eg0pt3)\n var_eg0pt1 = np.var(proportions_eg0pt1)\n var_eg0pt3 = np.var(proportions_eg0pt3)\n\n prop_lt_25_eg0pt1 = np.sum(proportions_eg0pt1 < 0.25) / len(proportions_eg0pt1)\n prop_lt_25_eg0pt3 = np.sum(proportions_eg0pt3 < 0.25) / len(proportions_eg0pt3)\n prop_lt_25_ts = np.sum(proportions_ts < 0.25) / len(proportions_ts)\n\n # prop_gt_25_lt_5_eg = np.sum(> proportions > 0.25) / len(proportions)\n # prop_gt_25_lt_5_ts = np.sum(> proportions_ts > 0.25) / len(proportions_ts)\n\n data = [[mean_ts, var_ts, prop_lt_25_ts],\\\n [mean_eg0pt1, var_eg0pt1, prop_lt_25_eg0pt1],\\\n [mean_eg0pt3, var_eg0pt3, prop_lt_25_eg0pt3]]\n\n\n final_data = [['%.3f' % j for j in i] for i in data] #<0.25, 0.25< & <0.5, <0.5 & <0.75, <0.75 & <1.0 \n #table.auto_set_font_size(False)\n # table.set_fontsize(7)\n # table.auto_set_column_width((-1, 0, 1, 2, 3))\n table = ax_h.table(cellText=final_data, colLabels=['Mean', 'Variance', 'prop < 0.25'], rowLabels = [\"Thompson Sampling\", \"Epsilon Greedy 0.1\", \"Epsilon Greedy 0.3\"], loc='bottom', cellLoc='center', bbox=[0.25, -0.5, 0.5, 0.3])\n table.auto_set_font_size(False)\n table.set_fontsize(7)\n table.auto_set_column_width((-1, 0, 1, 2, 3))\n\n # Adjust layout to make room for the table:\n #ax_h.tick_params(axis='x', pad=20)\n\n #fig_h.subplots_adjust(left=0.2, bottom=0.5)\n #fig_h.tight_layout()\n save_dir = \"../simulation_analysis_saves/histograms/ExploreAndExploit/N={}\".format(n)\n Path(save_dir).mkdir(parents=True, exist_ok=True)\n\n\n fig_h.savefig(save_dir + \"/condition_prop_n={}.png\".format(num_steps), bbox_inches = 'tight')\n fig_h.clf()\n\n\n\ndef stacked_bar_plot_with_cutoff_old(df_ts = None, df_eg0pt1 = None, df_eg0pt3 = None, df_unif = None, df_tsppd = None, n = None, num_sims = None, df_ets = None, \\\n title = None, bs_prop = 0.0,\\\n ax = None, ax_idx = None, epsilon = None, c = None):\n \n step_sizes = df_unif['num_steps'].unique()\n size_vars = [\"n/2\", \"n\", \"2*n\", \"4*n\"]\n t1_list_eg0pt1 = []\n t1_list_eg0pt3 = []\n \n c_idx_mapping= {0.08: 0, 0.1:1, 0.12:2}\n c_idx = c_idx_mapping[c]\n t1_list_unif = []\n t1_wald_list_unif = []\n var_list = []\n t1_list_ts = []\n t1_list_tsppd = []\n t1_list_ets = []\n for num_steps in step_sizes:\n \n df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps].dropna()\n df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps].dropna()\n df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps].dropna()\n df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps].dropna()\n df_for_num_steps_tsppd = df_tsppd[df_tsppd['num_steps'] == num_steps].dropna()\n df_for_num_steps_ets = df_ets[df_ets['num_steps'] == num_steps].dropna()\n #df_for_num_steps_unif = df_for_num_steps_unif.dropna()\n # bins = np.arange(0, 1.01, .025)\n\n plot_hist_and_table(df_for_num_steps_eg0pt1, df_for_num_steps_eg0pt3, df_for_num_steps_ts, df_for_num_steps_unif, num_steps, epsilon = epsilon, n=n)\n\n \n # print(num_replications)\n # if use_pval == True:\n\n # ipdb.set_trace()\n num_replications = len(df_for_num_steps_eg0pt1)\n #num_rejected_eg0pt1 = np.sum(df_for_num_steps_eg0pt1['pvalue'] < .05) #Epsilon Greedy\n num_rejected_eg0pt1 = np.sum(df_for_num_steps_eg0pt1['wald_pval'] < .05) #Epsilon Greedy\n #num_rejected_eg0pt1 = np.sum(wald_pval_eg0pt1 < .05) #Epsilon Greedy\n\n #num_rejected_eg0pt3 = np.sum(df_for_num_steps_eg0pt3['pvalue'] < .05) #Epsilon Greedy\n num_rejected_eg0pt3 = np.sum(df_for_num_steps_eg0pt3['wald_pval'] < .05) #Epsilon Greedy\n\n #num_rejected_ts = np.sum(df_for_num_steps_ts['pvalue'] < .05) #Thompson\n num_rejected_ts = np.sum(df_for_num_steps_ts['wald_pval'] < .05) #Thompson\n num_rejected_tsppd = np.sum(df_for_num_steps_tsppd['wald_pval'] < .05) #Thompson\n num_rejected_ets = np.sum(df_for_num_steps_ets['wald_pval'] < .05) #Thompson\n\n# num_rejected_unif = np.sum(df_for_num_steps_unif['pvalue'] < .05)\n num_rejected_unif = np.sum(df_for_num_steps_unif['wald_pval'] < .05)\n\n var = np.var(df_for_num_steps_unif['pvalue'] < .05)\n \n num_replications = len(df_for_num_steps_eg0pt1)\n print(\"num_replications eg0pt1\", num_replications)\n t1_eg0pt1 = num_rejected_eg0pt1 / num_replications\n num_replications = len(df_for_num_steps_eg0pt3)\n t1_eg0pt3 = num_rejected_eg0pt3 / num_replications\n\n num_replications = len(df_for_num_steps_ts)\n print(\"num_replications ts\", num_replications)\n t1_ts = num_rejected_ts / num_replications\n\n num_replications = len(df_for_num_steps_tsppd)\n print(\"num_replications tsppd\", num_replications)\n t1_tsppd = num_rejected_tsppd / num_replications\n\n num_replications = len(df_for_num_steps_ets)\n print(\"num_replications ets\", num_replications)\n t1_ets = num_rejected_ets / num_replications\n\n\n num_replications = len(df_for_num_steps_unif)\n print(\"num_replications unif\", num_replications)\n t1_unif =num_rejected_unif / num_replications\n \n t1_list_unif.append(t1_unif)\n t1_list_ts.append(t1_ts)\n t1_list_tsppd.append(t1_tsppd)\n t1_list_ets.append(t1_ets)\n \n t1_list_eg0pt1.append(t1_eg0pt1)\n t1_list_eg0pt3.append(t1_eg0pt3)\n var_list.append(var)\n \n t1_list_ts = np.array(t1_list_ts)\n t1_list_tsppd = np.array(t1_list_tsppd)\n t1_list_ets = np.array(t1_list_ets)\n ind = np.arange(3*len(step_sizes), step=3)\n # print(ind)\n # print(step_sizes)\n ax.set_xticks(ind)\n ax.set_xticklabels(step_sizes)\n \n print(\"var\", var_list)\n width = 0.3\n capsize = width*4\n width_total = 2*width\n \n \n t1_list_eg0pt1 = np.array(t1_list_eg0pt1)\n t1_list_eg0pt3 = np.array(t1_list_eg0pt3)\n t1_list_unif = np.array(t1_list_unif)\n \n num_sims_RL4RL = 5000\n t1_eg0pt1_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_eg0pt1*(1-t1_list_eg0pt1)/num_sims_RL4RL) #95 CI for Proportion\n t1_eg0pt3_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_eg0pt3*(1-t1_list_eg0pt3)/num_sims_RL4RL) #95 CI for Proportion\n \n t1_se_unif = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_unif*(1-t1_list_unif)/num_sims_RL4RL)\n t1_se_ts = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_ts*(1-t1_list_ts)/num_sims_RL4RL)\n num_sims_ppd = 5000\n t1_se_tsppd = stats.t.ppf(1-0.025, num_sims_ppd)*np.sqrt(t1_list_tsppd*(1-t1_list_tsppd)/num_sims_ppd)\n\n num_sims_ets = 5000\n t1_se_ets = stats.t.ppf(1-0.025, num_sims_ppd)*np.sqrt(t1_list_ets*(1-t1_list_ets)/num_sims_ets)\n\n print(t1_se_unif) #note that power goes to 1.0 for unif, thus error bars\n #print(t1_se_unif)\n p1 = ax.bar(ind, t1_list_eg0pt1, width = width, yerr = t1_eg0pt1_se, \\\n ecolor='black', capsize=capsize, color = 'yellow', edgecolor='black')\n \n p3 = ax.bar(ind+width, t1_list_eg0pt3, width = width, yerr = t1_eg0pt3_se, \\\n ecolor='black', capsize=capsize, color = 'green', edgecolor='black')\n \n p4 = ax.bar(ind+2*width, t1_list_ts, width = width, yerr = t1_se_ts, \n ecolor='black', capsize=capsize, color = 'blue', edgecolor='black') \n\n p5 = ax.bar(ind+4*width + c_idx*width, t1_list_tsppd, width = width, yerr = t1_se_tsppd, \n ecolor='black', capsize=capsize, color = 'purple', edgecolor='black') \n\n p6 = ax.bar(ind+3*width, t1_list_ets, width = width, yerr = t1_se_ets, \n ecolor='black', capsize=capsize, color = 'brown', edgecolor='black') \n\n \n p2 = ax.bar(ind-width, t1_list_unif, width = width,\\\n yerr = t1_se_unif, ecolor='black', \\\n capsize=capsize, color = 'red', \\\n edgecolor='black')\n if ax_idx == 2:\n # leg1 = ax.legend((p1[0], p2[0], p3[0], p4[0]), ('Epsilon Greedy Chi Squared 0.1', \"Uniform Chi Squared\", \"Epsilon Greedy Chi Squared 0.3\", \"Thompson Sampling Chi Squared\"), bbox_to_anchor=(1.0, 1.76))\n leg1 = ax.legend((p2[0], p1[0], p3[0], p4[0], p6[0], p5[0]), (\"Uniform Wald\", 'Epsilon Greedy 0.1 Wald', \"Epsilon Greedy 0.3 Wald\", \"Thompson Sampling Wald\", \"Epsilon 0.1 Thompson Sampling Wald\", \"PostDiff Thompson Sampling Wald \\n c left to right [0.08, 0.1, 0.12]\"), bbox_to_anchor=(1.0, 1.76)) \n # leg1 = ax.legend((p2[0], p1[0], p3[0], p4[0], p6[0], p5[0]), (\"Uniform Wald\", 'Epsilon Greedy 0.1 Wald', \"Epsilon Greedy 0.3 Wald\", \"Thompson Sampling Wald\", \"PPD c 0.1 Thompson Sampling Wald\", \"Epsilon 0.1 Thompson Sampling\"), bbox_to_anchor=(1.0, 1.76)) \n #leg1 = ax.legend((p2[0], p1[0], p3[0], p4[0]), (\"Uniform Chi Squared\", 'Epsilon Greedy Chi Squared 0.1', \"Epsilon Greedy Chi Squared 0.3\", \"Thompson Sampling Chi Squared\"), bbox_to_anchor=(1.0, 1.76)) \n #leg2 = ax.legend(loc = 2)\n \n ax.add_artist(leg1)\n # plt.tight_layout()\n # plt.title(title)\n# if ax_idx == 6 or ax_idx == 7 or ax_idx == 8:\n ax.set_xlabel(\"number of participants = \\n n/2, n, 2*n, 4*n\")\n ax.set_ylim(0, 1.0)\n# ax.set_ylim(0, 0.58)\n ax.axhline(y=0.80, linestyle='--')\n\n\n return [t1_list_unif, t1_list_eg0pt1, t1_list_ts] #returns [UR Eps_Greedy, TS], in this case, need to return for each step size, but only plotting for one bs, so save step size by model (4x2)\n\ndef parse_dir(root, root_cutoffs, num_sims, metric = \"Power&T1\", title = \"Power \\n Across {} Simulations\", ylabel = \"Power\", ymax = 1.0, num_es = 3):\n arm_prob= 0.5\n table_3_dict = {}\n table_3_dict_dummy = {}\n arm_prob_list = [0.2, 0.5, 0.8]\n num_es = 2\n if num_es ==3:\n es_list = [0.5, 0.3, 0.2, 0.1]\n n_list = [32, 88, 197, 785]\n fig, ax = plt.subplots(1, 4, figsize = (12,5))\n\n elif num_es == 2:\n es_list = [0.2, 0.1]\n n_list = [197, 785]\n fig, ax = plt.subplots(1, 2, figsize = (12,5))\n\n fig_www, ax_www = plt.subplots()#for phi\n #fig_p, ax_p = plt.subplots(2,1, figsize = (12,5))\n #fig_p, ax_p = plt.subplots(1,2, figsize = (10,5))\n fig_p, ax_p = plt.subplots(1,2)\n ax_p = ax_p.ravel()\n root_dir = root + \"/num_sims={}armProb={}\".format(5000, arm_prob)\n #fig.set_size_inches(17.5, 13.5)\n ax = ax.ravel()\n #ipdb.set_trace()\n num_sims_secb = 5000\n\n root_ts = \"../../../../banditalgorithms/src/RL4RLSectionB/simulation_saves/IsEffect_fixedbs_RL4RLMay8/num_sims={}armProb=0.5\".format(num_sims_secb)\n# root_ts = \"../../../RL4RLSectionB/simulation_saves/IsEffect_fixedbs_RL4RLMay8/num_sims={}armProb=0.5\".format(num_sims_secb)\n root_eg_old = \"../../../../banditalgorithms/src/RL4RLSectionB/simulation_saves/EpsilonGreedyIsEffect/num_sims={}armProb=0.5\".format(num_sims_secb)\n #root_eg = \"../../../RL4RLSectionB/simulation_saves/EpsilonGreedyIsEffect/num_sims={}armProb=0.5\".format(num_sims_secb)\n root_unif = \"../../../../banditalgorithms/src/RL4RLSectionB/simulation_saves/UniformIsEffect/num_sims={}armProb=0.5\".format(num_sims_secb)\n #root_unif = \"../../../RL4RLSectionB/simulation_saves/UniformIsEffect/num_sims={}armProb=0.5\".format(num_sims_secb)\n num_sims_ppd = 10000\n root_tsppd_rs = \"../simulation_saves/TSPPDIsEffectResampleFast/num_sims={}armProb=0.5\".format(num_sims_ppd)\n root_tsppd_rs_ne = \"../simulation_saves/TSPPDNoEffectResampleFast/num_sims={}armProb=0.5\".format(num_sims_ppd)\n root_tsppd_rs_5k = \"../simulation_saves/TSPPDIsEffectResampleFast/num_sims={}armProb=0.5\".format(5000)\n c = 0.1\n c_idx = 0\n c_list = [0.025, 0.05, 0.075, 0.1, 0.125, 0.15, 0.2, 0.3]\n c_list_labels = [\"c =eps=0.025\", \"c=eps=0.05\", \"c=eps=0.075\", \"c=eps=0.1\", \"c=eps=0.125\", \"c=eps=0.15\", \"c=eps=0.2\", \"c=0.3/eps=0.6\"]\n epsilon_list = [0.025, 0.05, 0.075, 0.1, 0.125, 0.15, 0.2, 0.6]\n j = 0\n root_ets = \"../simulation_saves/EpsilonTSIsEffect/num_sims={}armProb=0.5\".format(5000)\n root_ets = \"../simulation_saves/EpsilonTSIsEffect/num_sims={}armProb=0.5\".format(num_sims)\n root_ppdg = \"../simulation_saves/PPDGreedyIsEffectResampleFast/num_sims={}armProb=0.5\".format(num_sims) #This is fast, naming not consistent\n #\"EpsilonGreedyIsEffectFast/num_sims=10000armProb=0.5/es=0.1epsilon=0.1/\"\n root_eg = \"../simulation_saves/EpsilonGreedyIsEffectFast/num_sims={}armProb=0.5/\".format(num_sims) #This is fast, naming not consistent\n for c in c_list:\n j +=1\n epsilon = epsilon_list[j-1]\n i = 0\n for n in n_list:\n es = es_list[i]\n bs = 1\n es_dir_0pt1 = root_eg_old + \"/es={}epsilon={}/\".format(0.1, 0.1)\n es_dir_0pt3 = root_eg_old + \"/es={}epsilon={}/\".format(0.1, 0.3)\n ts_dir = root_ts + \"/es={}/\".format(es)\n ts_dir = root_tsppd_rs + \"/es={}c=0.0/\".format(es)\n unif_dir = root_unif + \"/es={}/\".format(es)\n tsppd_dir = root_dir + \"/es={}c={}/\".format(0.1, 0.1)\n tsppd_dir_rs = root_tsppd_rs + \"/es={}c={}/\".format(es, c)\n tsppd_dir_rs_ne = root_tsppd_rs_ne + \"/N={}c={}/\".format(785, c)\n if c == 0.3:\n tsppd_dir_rs = root_tsppd_rs_5k + \"/es={}c={}/\".format(es, c)\n ets_dir = root_ets + \"/es={}epsilon={}/\".format(es, epsilon)\n unif_dir = root_tsppd_rs + \"/es={}c={}/\".format(es, 1.0)\n\n if c <= 0.1:\n ppdg_dir = root_ppdg + \"/es={}c={}/\".format(es, 0.1)\n eg_dir = root_eg + \"/es={}epsilon={}/\".format(es, 0.1)\n else:\n ppdg_dir = root_ppdg + \"/es={}c={}/\".format(es, 0.2)\n eg_dir = root_eg + \"/es={}epsilon={}/\".format(es, 0.6)\n\n # ipdb.set_trace()\n to_check_eg0pt1 = glob.glob(es_dir_0pt1 + \"/*Prior*{}*{}Df.pkl\".format(bs,0.1))[0] #Has eg, 34 in 348!!\n assert(len(glob.glob(es_dir_0pt1 + \"/*Prior*{}*{}Df.pkl\".format(bs,0.1))) == 1)\n\n to_check_eg0pt3 = glob.glob(es_dir_0pt3 + \"/*Prior*{}*{}Df.pkl\".format(bs,0.1))[0] #Has eg, 34 in 348!!\n assert(len(glob.glob(es_dir_0pt3 + \"/*Prior*{}*{}Df.pkl\".format(bs,0.1))) == 1)\n \n# to_check_unif = glob.glob(unif_dir + \"/*Uniform*{}*{}*Df*.pkl\".format(bs, es))[0]\n to_check_unif = glob.glob(unif_dir + \"/*Prior*{}*{}*Df*.pkl\".format(bs, es))[0]\n assert(len(glob.glob(unif_dir + \"/*Prior*{}*{}*Df*.pkl\".format(bs, es))) == 1)\n\n to_check_ts = glob.glob(ts_dir + \"/*Prior*{}*{}Df*.pkl\".format(bs,es))[0] #Has eg, 34 in 348!!\n assert(len(glob.glob(ts_dir + \"/*Prior*{}*{}Df*.pkl\".format(bs,es))) == 1)\n\n to_check_tsppd = glob.glob(tsppd_dir + \"/*Prior*{}*{}Df.pkl\".format(bs,0.1))[0] #Has eg, 34 in 348!!\n assert(len(glob.glob(tsppd_dir + \"/*Prior*{}*{}Df.pkl\".format(bs,0.1))) == 1)\n\n# print(ets_dir)\n to_check_ets = glob.glob(ets_dir + \"/*Prior*{}*{}Df*.pkl\".format(bs,es))[0] #Has eg, 34 in 348!! \n assert(len(glob.glob(ets_dir + \"/*Prior*{}*{}Df*.pkl\".format(bs,es))) == 1)\n\n to_check_tsppd_rs = glob.glob(tsppd_dir_rs + \"/*Prior*{}*{}Df*.pkl\".format(bs,es))[0] #Has eg, 34 in 348!!\n assert(len(glob.glob(tsppd_dir_rs + \"/*Prior*{}*{}Df*.pkl\".format(bs,es))) == 1)\n\n to_check_tsppd_rs_ne = glob.glob(tsppd_dir_rs_ne + \"/*Prior*{}*{}Df*.pkl\".format(bs,785))[0] #Has eg, 34 in 348!!\n assert(len(glob.glob(tsppd_dir_rs_ne + \"/*Prior*{}*{}Df*.pkl\".format(bs, 785))) == 1)\n\n #------hists, tables etc\n with open(to_check_eg0pt1, 'rb') as f:\n df_eg0pt1 = pickle.load(f)\n with open(to_check_eg0pt3, 'rb') as f:\n df_eg0pt3 = pickle.load(f)\n \n with open(to_check_unif, 'rb') as f:\n df_unif = pickle.load(f)\n if to_check_ts != None:\n with open(to_check_ts, 'rb') as t:\n df_ts = pickle.load(t)\n with open(to_check_tsppd, 'rb') as f:\n df_tsppd = pickle.load(f)\n\n with open(to_check_ets, 'rb') as f:\n df_ets = pickle.load(f)\n\n\n with open(to_check_tsppd_rs, 'rb') as f:\n df_tsppd_rs = pickle.load(f)\n with open(to_check_tsppd_rs_ne, 'rb') as f:\n df_tsppd_rs_ne = pickle.load(f)\n\n \n # ipdb.set_trace()\n rect_key = \"TS\"\n rect_key = \"Drop NA\"\n rectify_vars_noNa(df_eg0pt1, alg_key = rect_key)\n rectify_vars_noNa(df_eg0pt3, alg_key = rect_key)\n rectify_vars_noNa(df_ts, alg_key = rect_key)\n rectify_vars_noNa(df_unif, alg_key = rect_key)\n \n assert np.sum(df_eg0pt1[\"wald_type_stat\"].isna()) == 0\n assert np.sum(df_eg0pt1[\"wald_pval\"].isna()) == 0\n \n\n#FROM HERE, make list of model dirs, pass into proccess_model_dir_list\n table_3_key_UR = TABLE_3_KEY_UR\n table_3_key_TS = TABLE_3_KEY_TS\n table_3_key_TSPPD = TABLE_3_KEY_TSPPD\n table_3_key_ETS = TABLE_3_KEY_ETS\n table_3_key_PPDG = TABLE_3_KEY_PPDG\n table_3_key_EG = TABLE_3_KEY_EG\n\n model_dir_list = [ts_dir, tsppd_dir_rs, ets_dir, unif_dir, ppdg_dir, eg_dir]\n model_key_list = [table_3_key_TS, table_3_key_TSPPD, table_3_key_ETS, table_3_key_UR, table_3_key_PPDG, table_3_key_EG]\n\n process_model_dir_list(model_dir_list = model_dir_list, model_key_list = model_key_list, c=c, n= n, es = es, table_3_dict = table_3_dict, metric = metric)\n\n if \"Power\" in metric:\n hline = 0.80\n if \"Reward\" in metric:\n hline = 0.5 + es/2\n if \"PropOpt\" in metric:\n hline = None\n else:\n hline = None\n next_df = stacked_bar_plot_with_cutoff(df_eg0pt1 = df_eg0pt1, df_eg0pt3 = df_eg0pt3, df_tsppd_rs = df_tsppd_rs, \\\n df_unif = df_unif, df_ts = df_ts, df_tsppd = df_tsppd, df_ets = df_ets,\\\n n = n, num_sims = num_sims,\n ax = ax[i], ax_idx = i, epsilon = epsilon, c=c, ymax = ymax, hline = hline, metric = metric, c_idx = c_idx, c_list = c_list, es = es, table_3_dict = table_3_dict_dummy)\n\n\n\n\n if \"Power\" in metric:\n table_key = \"Power\"\n elif \"Reward\" in metric:\n table_key = \"Reward\"\n elif \"PropOpt\" in metric:\n table_key = \"PropOpt\"\n elif \"PropEps\" in metric:\n table_key = \"PropEps\"\n\n df = pd.DataFrame(next_df, columns = [\"n/2\",\"n\",\"2n\",\"4n\"])\n df.index = [\"Thompson Sampling Wald\", \"Epsilon Thompson Sampling Wald\",\"Thompson Sampling PostDiff Wald\", \"Uniform Random Wald\"]\n #[UR, ppd, TS, ets]\n save_dir = \"../simulation_analysis_saves/Tables/{}/c_and_epsilon={}/N={}/\".format(table_key, c, n)\n\n Path(save_dir).mkdir(parents=True, exist_ok=True)\n df.to_csv(save_dir + \"/{}_n={}_numsims={}.csv\".format(table_key, n, num_sims)) \n\n title_cond1 = \"Proportion of Samples in Condition 1 For n = {} \\n Across {} Simulations \\n With Effect Size {} $p_1$ = {} $p_2$ = {}\".format(n, num_sims,es, 0.5 + es/2, 0.5 - es/2)\n\n if c == 0.1:\n hist_cond1(df_eg0pt1 = df_eg0pt1, df_eg0pt3 = df_eg0pt3,\\\n df_unif = df_unif, df_ts = df_ts, df_tsppd = df_tsppd, df_ets = df_ets,\\\n n = n, num_sims = num_sims,\\\n title = title_cond1)\n \n\n \n # ipdb.set_trace()\n\n title_scatter_ratio = \"Minimum Sample Size Ratio \\n Min($\\\\frac{n_1}{n_2}$, $\\\\frac{n_2}{n_1}$)\" + \" For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)\n\n # scatter_ratio(to_check_eg0pt1 = to_check_eg0pt1, to_check_eg0pt3 = to_check_eg0pt3,\\\n # to_check_unif = to_check_unif, to_check_ts = to_check_ts,\\\n # title = title_scatter_ratio, \\\n # n = n, num_sims = num_sims)\n\n title_table = \"TODO\"\n# table_means_diff(df_ts = df_ts, df_eg0pt1 = df_eg0pt1, df_eg0pt3 = df_eg0pt3, df_unif = df_unif, n = n, num_sims = num_sims, \\\n# title = title_table)\n\n if c == 0.1 and n == 785 and 0:\n scatter_correlation_helper_outer(df_eg0pt1 = df_eg0pt1, df_eg0pt3 = df_eg0pt3,\\\n df_unif = df_unif, df_ts = df_ts, effect_size = es, \\\n n = n, num_sims = num_sims) #Title created in helper fn\n\n\n # title_pval = \"Chi Squared P value Disutrbtuion For n = {} \\n Across {} Simulations\".format(n, num_sims)\n \n \n # hist_pval(to_check = to_check_eg0pt1, to_check_unif = to_check_unif, to_check_ts = to_check_ts, n = n, num_sims = num_sims, load_df = True, \\\n # title = title_pval, plot = True)\n\n title_mean1 = \"Mean 1 ($\\hatp_1$ with MLE) Disutrbtuion For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)\n\n title_mean2 = \"Mean 2 ($\\hatp_2$ with MLE) Disutrbtuion For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)\n\n # hist_means_bias(to_check_eg0pt1 = to_check_eg0pt1, to_check_eg0pt3 = to_check_eg0pt3,\\\n # to_check_unif = to_check_unif, to_check_ts = to_check_ts,\\\n # title = title_mean1, \\\n # n = n, num_sims = num_sims, mean_key = \"mean_1\")\n #\n # hist_means_bias(to_check_eg0pt1 = to_check_eg0pt1, to_check_eg0pt3 = to_check_eg0pt3,\\\n # to_check_unif = to_check_unif, to_check_ts = to_check_ts,\\\n # title = title_mean2, \\\n # n = n, num_sims = num_sims, mean_key = \"mean_2\")\n\n title_diff = \"Difference in Means (|$\\hatp_1$ - $\\hatp_2$| with MLE) Disutrbtuion For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)\n\n# hist_means_diff(df_eg0pt1 = df_eg0pt1, df_eg0pt3 = df_eg0pt3,\\\n# df_unif = df_unif, df_ts = df_ts,\\\n# title = title_diff, \\\n# n = n, num_sims = num_sims)\n\n title_imba = \"Sample Size Imbalance (|$\\\\frac{n_1}{n} - 0.5$|\"+\" Disutrbtuion For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)\n\n# hist_imba(df_eg0pt1 = df_eg0pt1, df_eg0pt3 = df_eg0pt3,\\\n# df_unif = df_unif, df_ts = df_ts,\\\n# title = title_imba, \\\n# n = n, num_sims = num_sims)\n\n # title_wald = \"Wald Statistic Sampling Disutrbtuion For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)\n\n # hist_wald(to_check_eg0pt1 = to_check_eg0pt1, to_check_eg0pt3 = to_check_eg0pt3,\\\n # to_check_unif = to_check_unif, to_check_ts = to_check_ts,\\\n # title = title_wald, \\\n # n = n, num_sims = num_sims)\n\n # title_kde = \"Wald Statistic KDE Sampling Disutrbtuion For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)#\n\n # KDE_wald(to_check_eg0pt1 = to_check_eg0pt1, to_check_eg0pt3 = to_check_eg0pt3,\\\n # to_check_unif = to_check_unif, to_check_ts = to_check_ts,\\\n # title = title_kde, \\\n # n = n, num_sims = num_sims)\n\n title_ap1 = \"Arm 1 Assignment Probability Disutrbtuion For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)\n title_ap2 = \"Arm 2 Assignment Probability Disutrbtuion For n = {} \\n Across {} Simulations \\n No Effect $p_1$ = $p_2$ = 0.5\".format(n, num_sims)\n\n actions_dir_ts = ts_dir + \"bbEqualMeansEqualPriorburn_in_size-batch_size={}-{}\".format(bs, bs)\n \n # probs_dict = calculate_assgn_prob_by_step_size(actions_root = actions_dir_ts, num_samples=1000, num_actions = 2, cached_probs={}, \n # prior = [1,1], binary_rewards = True, \\\n # config = {}, n = n,\\\n # num_sims = num_sims, batch_size = 1, no_effect = True)\n #\n #\n #\n # hist_assignprob(to_check_eg0pt1 = to_check_eg0pt1, to_check_eg0pt3 = to_check_eg0pt3,\\\n # to_check_unif = to_check_unif, to_check_ts = to_check_ts, ts_ap_df = probs_dict,\\\n # title = title_ap1, \\\n # n = n, num_sims = num_sims, mean_key_of_interest = \"mean_1\", mean_key_other = \"mean_2\")\n #\n #\n # hist_assignprob(to_check_eg0pt1 = to_check_eg0pt1, to_check_eg0pt3 = to_check_eg0pt3,\\\n # to_check_unif = to_check_unif, to_check_ts = to_check_ts, ts_ap_df = probs_dict,\\\n # title = title_ap2, \\\n # n = n, num_sims = num_sims, mean_key_of_interest = \"mean_2\", mean_key_other = \"mean_1\")\n #\n\n c_inc = [0.2, 0.15, 0.1, 0.05, 0.0, 0.025]\n if n == 785 and c in c_inc:\n plot_phi_www(df_tsppd_rs, num_sims, n, c, ax_www) #averaged over sims\n # plot_phi_www_multi(df_tsppd_rs, df_tsppd_rs_ne, num_sims, n, c, ax_www) #averaged over sims\n\n\n plot_phi(df_tsppd_rs, num_sims, n, c, ax_p[1], es = 0.1) #averaged over sims\n plot_phi(df_tsppd_rs_ne, num_sims, n, c, ax_p[0]) #averaged over sims\n ax_p[1].set_title(\"Effect Size = {} \\n $p_1^*$ = {} $p_2^*$ = {}\".format(es, 0.5 + es/2, 0.5 - es/2))\n\n ax_p[1].axhline(y=0.6, color='black', linestyle='--', linewidth = 0.5)\n ax_p[1].axhline(y=0.1, color='black', linestyle='--', linewidth = 0.5)\n\n ax_p[0].axhline(y=0.6, color='black', linestyle='--', linewidth = 0.5)\n ax_p[0].axhline(y=0.1, color='black', linestyle='--', linewidth = 0.5)\n # ax_p[0].plot(np.array([1/1, 1/393, 1/785, 1/3140]), color='black', linestyle='--')\n\n ax_p[0].set_title(\"Effect Size = {} \\n $p_1^*$ = $p_2^*$ = {}\".format(0, 0.5))\n ax[i].set_title(\"es = {}, n = {}\".format(es, n))\n ax[i].set_ylabel(ylabel)\n i += 1\n\n## df = pd.DataFrame(next_df, columns = [\"n/2\",\"n\",\"2n\",\"4n\"])\n## df.index = [\"Uniform Random Chi Squared\",\"Epsilon Greedy Chi Squared\", \"Thompson Sampling Chi Squared\"]\n##\n## save_dir = \"../simulation_analysis_saves/histograms/ExploreAndExploit/N={}\".format(n)\n##\n## Path(save_dir).mkdir(parents=True, exist_ok=True)\n##\n##\n## df.to_csv(save_dir + \"/Power_n={}_numsims={}.csv\".format(n, num_sims)) \n c_idx +=1\n \n fig_www.tight_layout(rect=[0, 0.03, 1, 0.72])\n save_dir_www = \"../simulation_analysis_saves/num_sims={}/phi_plots_WWW/IsEffect/\".format(num_sims)\n Path(save_dir_www).mkdir(parents=True, exist_ok=True)\n\n title_www = \"$\\hat \\phi$ Across {} Simulations \\n $\\phi$ := \\mathbb P($|p_1 - p_2| < c$) \\n $p_1^*$ = 0.55, $p_2^*$ = 0.45 \\n Effect Size = 0.1\".format(num_sims)\n# title_www = \"$\\hat \\phi$ Across {} Simulations $p_1 = p_2 = 0.5$ \\n $\\phi$ := p($|p_1 - p_2| < c$) \\n n = 785\".format(num_sims)\n \n fig_www.suptitle(title_www)\n fig_www.savefig(save_dir_www+ \"/\" + title_www +\".png\")\n\n fig_p.tight_layout(rect=[0, 0.03, 1, 0.87])\n save_dir = \"../simulation_analysis_saves/num_sims={}/phi_plots/IsEffect/\".format(num_sims)\n Path(save_dir).mkdir(parents=True, exist_ok=True)\n\n #title_p = r\"$\\hat \\phi_t$ Across 10,000 Simulations\".format(num_sims) +\" \\n $\\phi_t$ := $\\mathbb{P}(|p_1 - p_2| < c)$\"\n title_p = r\"$\\hat \\phi_t$ Across 10,000 Simulations\" + \" \\n $\\phi_t$ := $\\mathbb{P}(|p_1 - p_2| < c)$\"\n# title_p = r'$s(t) = \\mathcal{A}\\mathrm{sin}(2 \\omega t)$'\n \n fig_p.suptitle(title_p)\n fig_p.savefig(save_dir + \"/\" + title_p +\".png\")\n\n title = title.format(num_sims)\n #ax[i].set_title(title, fontsize = 55)\n #i +=1\n #fig.suptitle(\"Type One Error Rates Across {} Simulations\".format(num_sims))\n fig.suptitle(title)\n #fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n #handles, labels = ax[i-1].get_legend_handles_labels()\n \n #fig.legend(handles, labels, loc='upper right', prop={'size': 50})\n #fig.tight_layout()\n save_dir = \"../simulation_analysis_saves/num_sims={}/power_t1_plots\".format(num_sims)\n if not os.path.isdir(save_dir):\n os.mkdir(save_dir)\n# print(\"saving to \", \"plots/{}.png\".format(title))\n \n #fig.set_tight_layout(True)\n fig.tight_layout()\n fig.subplots_adjust(top=.8)\n \n\n fig.savefig(save_dir + \"/{}_num_es={}.svg\".format(title, num_es), bbox_inches = 'tight')\n # plt.show()\n fig.clf()\n plt.close(fig)\n convert_table3_dict_totable_loop(table_3_dict, c_list = c_list, n_list = n_list, num_sims = num_sims, es_list = es_list, metric = metric, c_list_labels = c_list_labels, keys_list = model_key_list)\n #convert_table3_dict_totable(table_3_dict, c_list = c_list, n_list = n_list, num_sims = num_sims, es_list = es_list, metric = metric, c_list_labels = c_list_labels)\n\n\n \nif __name__ == \"__main__\":\n root = \"../simulation_saves/TSPPDIsEffect\"\n #parse_dir(root, root_cutoffs)\n num_sims = 500\n num_sims = 5000\n num_sims = 10000\n parse_dir(root, root, num_sims)\n\n",
"'''\nCreated on Jul 21, 2017\n\n@: a\n'''\nimport argparse \nimport math\nimport random\nimport sys\nimport time\n\nimport scipy.stats\nsys.path.insert(1, '../../le_experiments/')\nimport beta_bernoulli\nimport effect_size_sim_output_viz\nfrom forced_actions import forced_actions\nimport generate_single_bandit\nimport get_assistments_rewards\nimport numpy as np\nimport pandas as pd\nimport reorder_samples_in_rewards\nimport run_effect_size_simulations\nimport statsmodels.stats.api as sms\nimport statsmodels.stats.power as smp\nimport thompson_policy\n\n\nDESIRED_POWER = 0.8\nDESIRED_ALPHA = 0.05\n\nPRIOR_PROPORTION_DIFFERENCE = .5 # When making arms that don't have the prior in the center what proportion of the remaining space to use for the arms\n\naction_header ='AlgorithmAction'\nobs_reward_header = 'ObservedRewardofAction'\n\ndef calculate_by_trial_statistics_from_sims(outfile_directory, num_sims, step_sizes, effect_size, alpha = 0.05):\n '''\n Similar to the non-by_trial version, but adds in a column for trial number and repeatedly analyzes the first\n n steps of each trial, for n = 1:N.\n '''\n rows =[]\n for num_steps in step_sizes: \n for i in range(num_sims):\n output_file = pd.read_csv(get_output_filename(outfile_directory, num_steps, i),skiprows=1)\n for step in range(effect_size_sim_output_viz.TRIAL_START_NUM, num_steps + 1):\n cur_data = output_file[:step]\n cur_row = make_stats_row_from_df(cur_data, False)\n cur_row['num_steps'] = num_steps\n cur_row['sim'] = i\n cur_row['trial'] = step \n rows.append(cur_row)\n # Now do some aggregating\n df = pd.DataFrame(rows, columns=['num_steps', 'sim', 'trial', 'sample_size_1', 'sample_size_2', 'mean_1','mean_2', 'total_reward', 'ratio', 'actual_es', 'stat', 'pvalue', 'df'])\n \n return df\n\n\n\ndef create_arm_stats_by_step(outfile_directory, num_sims, num_steps, num_arms = 2):\n '''\n Creates a data frame that has the average arm estimates for each arm at each point in the simulation.\n Includes the estimated mean and variance for each arm, as well as what simulation and trial we're on.\n '''\n success_template = 'Action{0}SuccessCount'\n failure_template = 'Action{0}FailureCount'\n successes = [success_template.format(i) for i in range(1, num_arms + 1)]\n failures = [failure_template.format(i) for i in range(1, num_arms + 1)]\n\n arm_mean_columns = ['mean_arm_' + str(i) for i in range(1, num_arms + 1)]\n arm_var_columns = ['var_arm_' + str(i) for i in range(1, num_arms + 1)]\n \n all_dfs = []\n #print(\"ARMSTATS---\")\n #print(\"num_steps\", num_steps)\n for i in range(num_sims):\n output_file = pd.read_csv(get_output_filename(outfile_directory, num_steps, i),skiprows=1)\n # print(\"output_file\", output_file)\n mean_dfs = [output_file[successHeader] / (output_file[successHeader] + output_file[failureHeader]) for successHeader, failureHeader in zip(successes, failures)]\n all_means_df = pd.DataFrame({arm_mean_column: col for arm_mean_column, col in zip(arm_mean_columns,mean_dfs)})\n var_dfs = [scipy.stats.beta.var(output_file[successHeader], output_file[failureHeader]) for successHeader, failureHeader in zip(successes, failures)]\n all_vars_df = pd.DataFrame({arm_var_column: col for arm_var_column, col in zip(arm_var_columns,var_dfs)})\n means_and_var_df = pd.concat([all_means_df,all_vars_df], axis=1, join=\"inner\")\n means_and_var_df.insert(0,'num_steps',num_steps)\n means_and_var_df.insert(0,'sim',i)\n means_and_var_df.insert(0,'trial',range(0, all_vars_df.shape[0]))\n all_dfs.append(means_and_var_df)\n\n # Now do some aggregating\n #print(\"all_dfs len\", len(all_dfs))\n\n df = all_dfs[0].append(all_dfs[1:])\n return df\n\ndef make_stats_row_from_df(cur_data, include_power, effect_size = None, alpha = None):\n '''Calculates output statistics given the data frame cur_data. If include_power, includes the power calculation.\n efffect_size and alpha are only required/used if power is calculated\n '''\n sample_sizes = np.array([np.sum(cur_data[action_header] == i) for i in range(1,3)])\n successes = np.array([np.sum(cur_data[cur_data[action_header] == 1][obs_reward_header]), \n np.sum(cur_data[cur_data[action_header] == 2][obs_reward_header])])\n #calculate sample size and mean\n cur_row = {}\n \n sample_size_1 = sample_sizes[0]\n sample_size_2 = sample_sizes[1]\n cur_row['sample_size_1'] = sample_size_1\n cur_row['sample_size_2'] = sample_size_2\n\n mean_1 = np.mean(cur_data[cur_data[action_header] == 1][obs_reward_header])# JN mean for arm 1\n mean_2 = np.mean(cur_data[cur_data[action_header] == 2][obs_reward_header])#JN mean for arm 2\n\n cur_row['mean_1'] = mean_1\n cur_row['mean_2'] = mean_2\n #SE = sqrt[(P^hat_A*(1-P^hat_A)/N_A + (P^hat_B*(1-P^hat_B)/N_B]\n SE = np.sqrt(mean_1*(1 - mean_1)/sample_size_1 + mean_2*(1 - mean_2)/sample_size_2)\n wald_type_stat = (mean_1 - mean_2)/SE #(P^hat_A - P^hat_b)/SE\n #print('wald_type_stat:', wald_type_stat)\n wald_pval = (1 - scipy.stats.norm.cdf(np.abs(wald_type_stat)))*2 #Two sided, symetric, so compare to 0.05\n\n cur_row['wald_type_stat'] = wald_type_stat\n cur_row['wald_pval'] = wald_pval\n #print(\"wald_pval\", wald_pval)\n #calculate total reward\n cur_row['total_reward'] = np.sum(cur_data[obs_reward_header])\n \n #calculate power\n cur_row['ratio'] = sample_sizes[0] / sample_sizes[1]\n if include_power:\n cur_row['power'] = smp.GofChisquarePower().solve_power(effect_size, nobs = sum(sample_sizes), n_bins=(2-1)*(2-1) + 1, alpha = alpha)\n cur_row['actual_es'] = calculate_effect_size(sample_sizes, successes)\n \n\n #calculate chi squared contingency test\n table = sms.Table(np.stack((successes,sample_sizes - successes)).T)\n rslt = table.test_nominal_association()\n cur_row['stat'] = rslt.statistic\n cur_row['pvalue'] = rslt.pvalue\n cur_row['df'] = rslt.df\n # Added to match normal rewards\n cur_row['statUnequalVar'],cur_row['pvalueUnequalVar'], cur_row['dfUnequalVar'] = cur_row['stat'],cur_row['pvalue'],cur_row['df']\n return cur_row\n\ndef calculate_statistics_from_sims(outfile_directory, num_sims, step_sizes, effect_size, alpha = 0.05):\n '''\n - Output that we'll look at:\n -- Given the actual average ratios of conditions, what is our average power (calculated using formulas above)?\n -- Actually conducting a t-test on our observations, what are our average statistics and p-values? Calculate the rate of rejecting the null hypothesis for the simulations (we'd expect it to be .8, since that's what power means).\n -- How large is our actual effect size on average, given that we have unequal samples?\n '''\n # Go through each file to collect the vectors of rewards we observed from each arm [i.e. condition]\n rows =[]\n for num_steps in step_sizes: \n for i in range(num_sims):\n output_file = pd.read_csv( get_output_filename(outfile_directory, num_steps, i),skiprows=1)\n cur_row = make_stats_row_from_df(output_file, True, effect_size, alpha)\n cur_row['num_steps'] = num_steps\n cur_row['sim'] = i\n rows.append(cur_row)\n # Now do some aggregating\n df = pd.DataFrame(rows, columns=['num_steps', 'sim', 'sample_size_1', 'sample_size_2', 'mean_1','mean_2',\\\n 'total_reward', 'ratio', 'power', 'actual_es', 'stat', 'pvalue', \\\n 'df','statUnequalVar','pvalueUnequalVar', 'dfUnequalVar', 'wald_pval', 'wald_type_stat'])\n \n return df\n\n\n\ndef calculate_effect_size(ns, successes):\n '''\n Calculates the actual effect size given that we have ns[i] observations for each condition i,\n and successes[i] successes were observed in condition i.\n '''\n ns = np.array(ns)\n successes = np.array(successes)\n succ_prop = successes / sum(ns)\n fail_prop = (ns - successes) / sum(ns)\n prob_table_h1 = np.stack((succ_prop, fail_prop)).T # This is equivalent to P in ES.w2 in R\n pi = np.sum(prob_table_h1,axis=0)\n pj = np.sum(prob_table_h1,axis=1)\n prob_table_h0 = np.outer(pi, pj).T\n w = math.sqrt(sum(sum((prob_table_h1-prob_table_h0)**2/prob_table_h0)))\n return w\n \n \n\ndef get_rewards_filename(outfile_directory, num_steps, sim_num):\n '''\n Returns the name of the file that will have the rewards for sim_num that has num_steps steps.\n '''\n separator = '/'\n if outfile_directory.endswith('/'):\n separator = ''\n reward_data_file = outfile_directory + separator + 'tbb_rewards_{0}_{1}.csv'\n return reward_data_file.format(num_steps, sim_num)\n\ndef get_reordered_rewards_filename(outfile_directory, num_steps, sim_num):\n '''\n Returns the name of the file that will have the rewards for sim_num that has num_steps steps.\n '''\n separator = '/'\n if outfile_directory.endswith('/'):\n separator = ''\n reward_data_file = outfile_directory + separator +'/tbb_rewards_reordered_{0}_{1}.csv'\n return reward_data_file.format(num_steps, sim_num)\n\ndef get_output_filename(outfile_directory, num_steps, sim_num):\n '''\n Returns the name of the file that will have the actions taken for sim_num that has num_steps steps.\n '''\n separator = '/'\n if outfile_directory.endswith('/'):\n separator = ''\n results_data_file = outfile_directory + separator + 'tbb_actions_{0}_{1}.csv'\n return results_data_file.format(num_steps, sim_num)\n\ndef run_simulations(num_sims, prob_per_arm, step_sizes, outfile_directory, successPrior = 1, failurePrior = 1, softmax_beta = None, \\\n reordering_fn = None, forceActions = 0, batch_size = 1, burn_in_size = 1):\n '''\n Runs num_sims bandit simulations with several different sample sizes (those in the list step_sizes). \n Bandit uses the thompson_ng sampling policy.\n '''\n\n for i in range(num_sims):\n # num_steps_prev = 0\n for num_steps in step_sizes:\n if forceActions != 0:\n# print(\"Forcing actions:\", forceActions)\n forced = run_effect_size_simulations.make_forced_actions(len(prob_per_arm), num_steps, forceActions)\n else:\n forced = forced_actions()\n cur_reward_file = get_rewards_filename(outfile_directory, num_steps, i)\n generate_single_bandit.generate_file(np.array(prob_per_arm),\n num_steps, \n cur_reward_file)\n if softmax_beta != None:\n # reorder rewards\n reordered_reward_file = get_reordered_rewards_filename(outfile_directory, num_steps, i)\n reorder_samples_in_rewards.reorder_rewards_by_quartile(cur_reward_file, \n reordered_reward_file, \n reordering_fn, \n softmax_beta)\n else:\n reordered_reward_file = cur_reward_file\n cur_output_file = get_output_filename(outfile_directory, num_steps, i)\n models = [beta_bernoulli.BetaBern(success=successPrior, failure=failurePrior) for _ in range(len(prob_per_arm))]\n\n\n '''thompson_policy.calculate_thompson_single_bandit(reordered_reward_file, \n num_actions=len(prob_per_arm), \n dest= cur_output_file, \n models=models, \n action_mode=thompson_policy.ActionSelectionMode.prob_is_best, \n relearn=True,\n forced = forced,\n batch_size = batch_size, \n burn_in_size = burn_in_size)\n '''\n # num_steps_prev = num_steps\n thompson_policy.old_two_phase_random_thompson_policy(reordered_reward_file, \n num_actions=len(prob_per_arm), \n dest= cur_output_file, \n random_dur=0,\n models=models,\n random_start=0,\n action_mode=thompson_policy.ActionSelectionMode.prob_is_best, \n relearn=True,\n forced = forced,\n batch_size = batch_size, \n burn_in_size = burn_in_size)\n\n\ndef run_simulations_empirical_rewards(num_sims, reward_file, experiment_id, reward_header, is_cost, \n outfile_directory, successPrior = 1, failurePrior = 1, forceActions = 0,\n shuffle_data = False):\n '''\n Runs num_sims bandit simulations with several different sample sizes (those in the list step_sizes). \n Bandit uses the thompson_ng sampling policy.\n '''\n num_actions = 2\n max_steps = -1\n means = []\n variance = []\n for i in range(num_sims):\n arm_1_rewards, arm_2_rewards = get_assistments_rewards.read_assistments_rewards(reward_file, \n reward_header, \n experiment_id,\n is_cost)\n if shuffle_data:\n random.shuffle(arm_1_rewards)\n random.shuffle(arm_2_rewards)\n max_steps = len(arm_1_rewards) + len(arm_2_rewards)\n means = [np.mean(arm_1_rewards), np.mean(arm_2_rewards)]\n variance= [np.var(arm_1_rewards), np.var(arm_2_rewards)]\n if forceActions != 0:\n print(\"Forcing actions:\", forceActions)\n forced = run_effect_size_simulations.make_forced_actions(num_actions, len(arm_1_rewards) + len(arm_2_rewards), forceActions)\n else:\n forced = forced_actions()\n\n \n cur_output_file = get_output_filename(outfile_directory, len(arm_1_rewards) + len(arm_2_rewards), i)\n models = [beta_bernoulli.BetaBern(success=successPrior, failure=failurePrior) for _ in range(num_actions)]\n thompson_policy.calculate_thompson_single_bandit_empirical_params(arm_1_rewards,\n arm_2_rewards, \n num_actions=num_actions, \n dest= cur_output_file, \n models=models, \n action_mode=thompson_policy.ActionSelectionMode.prob_is_best, \n relearn=True,\n forced = forced)\n return max_steps, means, variance\n\n \ndef run_simulations_uniform_random(num_sims, prob_per_arm, step_sizes, outfile_directory, forceActions = 0):\n '''\n Runs num_sims bandit simulations with several different sample sizes (those in the list step_sizes). \n Bandit uses the thompson_ng sampling policy.\n '''\n\n for i in range(num_sims):\n for num_steps in step_sizes:\n if forceActions != 0:\n print(\"Forcing actions:\", forceActions)\n forced = run_effect_size_simulations.make_forced_actions(len(prob_per_arm), num_steps, forceActions)\n else:\n forced = forced_actions()\n cur_reward_file = get_rewards_filename(outfile_directory, num_steps, i)\n generate_single_bandit.generate_file(np.array(prob_per_arm),\n num_steps, \n cur_reward_file)\n# \n cur_output_file = get_output_filename(outfile_directory, num_steps, i)\n models = [beta_bernoulli.BetaBern(success=1, failure=1) for _ in range(len(prob_per_arm))]\n thompson_policy.calculate_thompson_single_bandit(cur_reward_file, \n num_actions=len(prob_per_arm), \n dest= cur_output_file, \n models=models, \n action_mode=thompson_policy.ActionSelectionMode.prob_is_best,\n epsilon = 1.0, \n relearn=True,\n forced = forced)\n \n\ndef get_prob_per_arm_from_effect_size(effect_size, center = 0.5):\n '''\n Calculates the probability of success on each arm that would be needed to get the given effect_size.\n Assumes the sample sizes will be equal, and that we have two arms, one with probability .5 + x, the other\n with probability .5 - x, for some x.\n '''\n x = effect_size / 2\n return [center + x, center - x]\n\n# def getParsedArguments():\n# parser = argparse.ArgumentParser()\n# parser.add_argument(\"--numSims\", type=int, default=1, help=\"total number of simulations of the bandit to run (each simulation involves multiple steps)\")\n# parser.add_argument(\"-o\", \"--outfile_dir\", type=str, \n# help=\"directory where all of the output will be written\")\n# parser.add_argument(\"-e\", \"--effectSize\", type=float,\n# help=\"set the arms to have this effect size (must be > 0)\")\n# parser.add_argument(\"--armProbs\", type=float,\n# help=\"set the arms to have this effect size (must be > 0)\")\n# parser.add_argument(\"-w\", \"--writeAllData\", type=bool, default=True,\n# help=\"if true, write all runs to the table file; otherwise, write only averages\")\n# parser.add_argument(\"--twoFactor\", type=bool, default=False,\n# help=\"if true, uses two-factor latent structure reward; otherwise, uses MVN\")\n# \n# args = parser.parse_args()\n# return args\n\ndef main():\n start_time = time.time()\n recalculate_bandits = True\n \n #batch_size = 1.0\n\n num_sims = int(sys.argv[2])\n outfile_directory = sys.argv[3]\n burn_in_size, batch_size = int(outfile_directory.split(\"=\")[-1].split('-')[0]), int(outfile_directory.split(\"=\")[-1].split('-')[1])\n print(\"burn_in_size, batch_size\", burn_in_size, batch_size)\n num_arms = 2\n # if sys.argv[1] has a comma, just use the result as probability per arm\n if \",\" in sys.argv[1]:\n if sys.argv[1].count(\",\") ==1:\n # specifying probability per arm but not effect size\n prob_per_arm = [float(armProb) for armProb in sys.argv[1].split(\",\")]\n effect_size = 0 # Note: This will be wrong if arm probs aren't equal!\n else:\n # specifying probability per arm as first two arguments, and then effect size\n numeric_arguments = [float(armProb) for armProb in sys.argv[1].split(\",\")];\n prob_per_arm = numeric_arguments[:2] # first two are arm probabilities\n effect_size = numeric_arguments[2] # final is effect size\n # We also need to specify n in this case for deciding on step sizes\n n = int(sys.argv[6])\n else:\n # We just need effect size for this calculation\n effect_size = float(sys.argv[1].split(\"-\")[0])\n center = float(sys.argv[1].split(\"-\")[1])\n prob_per_arm = get_prob_per_arm_from_effect_size(effect_size, center)\n # Assumes we have two arms\n nobs_total = smp.GofChisquarePower().solve_power(effect_size, n_bins=(2-1)*(2-1) + 1, alpha = DESIRED_ALPHA, power = DESIRED_POWER)\n# print(\"Calculated nobs for effect size:\", nobs_total)\n n = math.ceil(nobs_total)\n print(\"center\", center)\n #step_sizes = [math.ceil(n/2), n, 2*n] # These differ from the version for normal because in normal, n represented size for one cond rather than overall size\n step_sizes = [math.ceil(n/2), n, 2*n, 4*n] # These differ from the version for normal because in normal, n represented size for one cond rather than overall size\n #\n step_sizes = [n]\n\n print(\"prob_per_arm\", prob_per_arm)\n if len(sys.argv) > 7 and sys.argv[7].startswith(\"forceActions\"):\n run_effect_size_simulations.FORCE_ACTIONS = True\n num_to_force = float(sys.argv[7].split(\",\")[1])\n else:\n num_to_force = 0\n \n bandit_type = \"Thompson\"\n bandit_type_prefix = 'BB'\n if len(sys.argv) > 4:\n bandit_type = sys.argv[4]\n if bandit_type == \"uniform\":\n bandit_type_prefix = \"BU\"# Bernoulli rewards, uniform policy\n \n reorder_rewards = False\n softmax_beta = None\n reordering_fn = None\n if len(sys.argv) > 7 and not sys.argv[7].startswith(\"forceActions\"):\n # softmax beta for how to reorder rewards\n reorder_rewards = True\n softmax_beta = float(sys.argv[7])\n reordering_fn = reorder_samples_in_rewards.order_by_named_column('Action1OracleActualReward')\n if len(sys.argv) > 8:\n reordering_fn_specifier = sys.argv[8]\n reordering_fn = reorder_samples_in_rewards.get_reordering_fn(reordering_fn_specifier)\n\n prior_params = None\n if recalculate_bandits:\n \n if bandit_type == \"uniform\":\n run_simulations_uniform_random(num_sims, prob_per_arm, step_sizes, outfile_directory, forceActions = num_to_force)\n else:\n if len(sys.argv) > 5:\n if sys.argv[5] == \"armsHigh\":\n # Arms should be higher than the prior\n priorProportionOnSuccess = min(prob_per_arm)*PRIOR_PROPORTION_DIFFERENCE\n elif sys.argv[5] == \"armsLow\":\n # Arms should be lower than the prior\n priorProportionOnSuccess = 1 - (1-max(prob_per_arm))*PRIOR_PROPORTION_DIFFERENCE\n else:\n # Prior should be uniform (in between arms)\n priorProportionOnSuccess = .5\n # Make sure the prior sums to 2, mirroring the successes/failures of uniform prior\n prior_params = [priorProportionOnSuccess*2, 2-priorProportionOnSuccess*2]\n print(\"Prior params: \", prior_params)\n \n run_simulations(num_sims, prob_per_arm, step_sizes, outfile_directory, \n prior_params[0], prior_params[1], \n softmax_beta = softmax_beta, reordering_fn = reordering_fn,\n forceActions = num_to_force, batch_size = batch_size, burn_in_size = burn_in_size)\n else:\n run_simulations(num_sims, prob_per_arm, step_sizes, outfile_directory, forceActions = num_to_force, batch_size = batch_size, \\\n burn_in_size = burn_in_size)\n \n outfile_prefix = outfile_directory + bandit_type_prefix + str(effect_size);\n if effect_size == 0:\n # Then include the n in the prefix\n outfile_prefix += \"N\" + str(n) \n\n df = calculate_statistics_from_sims(outfile_directory, num_sims, step_sizes, effect_size, DESIRED_ALPHA)\n df.to_pickle(outfile_prefix + 'Df.pkl')\n df_by_trial = calculate_by_trial_statistics_from_sims(outfile_directory, num_sims, step_sizes, effect_size, DESIRED_ALPHA)\n\n df_by_trial.to_pickle(outfile_prefix + 'DfByTrial.pkl')\n # Print various stats\n summary_text = effect_size_sim_output_viz.print_output_stats(df, prob_per_arm, False, prior_params = prior_params, reordering_info = softmax_beta)\n with open(outfile_prefix + 'SummaryText.txt', 'w', newline='') as outf:\n outf.write(summary_text)\n overall_stats_df = effect_size_sim_output_viz.make_overall_stats_df(df, prob_per_arm, False, effect_size)\n overall_stats_df.to_pickle(outfile_prefix + 'OverallStatsDf.pkl')\n \n # Make histogram\n hist_figure = effect_size_sim_output_viz.make_hist_of_trials(df)\n hist_figure.savefig(outfile_prefix + 'HistOfConditionProportions.pdf', bbox_inches='tight')\n \n # Make line plot\n test_stat_figure = effect_size_sim_output_viz.make_by_trial_graph_of_column(df_by_trial, 'stat')\n test_stat_figure.savefig(outfile_prefix + 'TestStatOverTime.pdf', bbox_inches='tight')\n \n pvalue_figure = effect_size_sim_output_viz.make_by_trial_graph_of_column(df_by_trial, 'pvalue')\n pvalue_figure.savefig(outfile_prefix + 'PValueOverTime.pdf', bbox_inches='tight')\n \n # Plot power\n power_figure = effect_size_sim_output_viz.plot_power_by_steps(df_by_trial, DESIRED_ALPHA, DESIRED_POWER)\n power_figure.savefig(outfile_prefix + 'PowerOverTime.pdf', bbox_inches='tight')\n \n #Plot reward\n reward_figure = effect_size_sim_output_viz.make_by_trial_graph_of_column(df_by_trial, 'total_reward')\n reward_figure = effect_size_sim_output_viz.add_expected_reward_to_figure(reward_figure, prob_per_arm, step_sizes)\n reward_figure.savefig(outfile_prefix + 'RewardOverTime.pdf', bbox_inches='tight')\n \n # Plot arm statistics\n arm_df_by_trial = create_arm_stats_by_step(outfile_directory, num_sims, step_sizes[-1], num_arms)\n arm_stats_figure = effect_size_sim_output_viz.make_by_trial_arm_statistics(arm_df_by_trial, num_arms)\n arm_stats_figure.savefig(outfile_prefix + 'ArmStats.pdf', bbox_inches='tight')\n end_time = time.time()\n print('Execution time = %.6f seconds' % (end_time-start_time))\n\n\ndef empirical_main():\n # Assumes sys.argv[1] == 'empirical'\n recalculate_bandits = True\n num_arms = 2\n\n num_sims = int(sys.argv[2])\n \n reward_file = sys.argv[3]\n experiment_id = sys.argv[4]\n reward_header = sys.argv[5]\n \n if sys.argv[6] == \"use_cost\":\n is_cost = True\n else:\n is_cost = False\n \n outfile_directory = sys.argv[7]\n \n priorProportionOnSuccess = float(sys.argv[8])\n \n forceActions = 0\n \n shuffle_data = False\n if len(sys.argv) > 9:\n shuffle_data = sys.argv[9] == 'True'\n \n bandit_type = \"Thompson\"\n bandit_type_prefix = 'BB'\n\n prior_params = None\n if recalculate_bandits:\n # Make sure the prior sums to 2, mirroring the successes/failures of uniform prior\n prior_params = [priorProportionOnSuccess*2, 2-priorProportionOnSuccess*2]\n print(\"Prior params: \", prior_params)\n \n max_steps, prob_per_arm, variance = run_simulations_empirical_rewards(num_sims, reward_file, experiment_id, reward_header, is_cost, \n outfile_directory, prior_params[0], prior_params[1], forceActions, shuffle_data)\n \n outfile_prefix = outfile_directory + bandit_type_prefix + experiment_id + reward_header\n effect_size = 0\n step_sizes = [max_steps]\n df = calculate_statistics_from_sims(outfile_directory, num_sims, step_sizes, effect_size, DESIRED_ALPHA)\n df.to_pickle(outfile_prefix + 'Df.pkl')\n df_by_trial = calculate_by_trial_statistics_from_sims(outfile_directory, num_sims, step_sizes, effect_size, DESIRED_ALPHA)\n\n df_by_trial.to_pickle(outfile_prefix + 'DfByTrial.pkl')\n # Print various stats\n summary_text = effect_size_sim_output_viz.print_output_stats(df, prob_per_arm, False, prior_params = prior_params, reordering_info = 0)\n with open(outfile_prefix + 'SummaryText.txt', 'w', newline='') as outf:\n outf.write(summary_text)\n overall_stats_df = effect_size_sim_output_viz.make_overall_stats_df(df, prob_per_arm, False, effect_size)\n overall_stats_df.to_pickle(outfile_prefix + 'OverallStatsDf.pkl')\n \n # Make histogram\n hist_figure = effect_size_sim_output_viz.make_hist_of_trials(df)\n hist_figure.savefig(outfile_prefix + 'HistOfConditionProportions.pdf', bbox_inches='tight')\n \n # Make line plot\n test_stat_figure = effect_size_sim_output_viz.make_by_trial_graph_of_column(df_by_trial, 'stat')\n test_stat_figure.savefig(outfile_prefix + 'TestStatOverTime.pdf', bbox_inches='tight')\n \n pvalue_figure = effect_size_sim_output_viz.make_by_trial_graph_of_column(df_by_trial, 'pvalue')\n pvalue_figure.savefig(outfile_prefix + 'PValueOverTime.pdf', bbox_inches='tight')\n \n # Plot power\n power_figure = effect_size_sim_output_viz.plot_power_by_steps(df_by_trial, DESIRED_ALPHA, DESIRED_POWER)\n power_figure.savefig(outfile_prefix + 'PowerOverTime.pdf', bbox_inches='tight')\n \n #Plot reward\n reward_figure = effect_size_sim_output_viz.make_by_trial_graph_of_column(df_by_trial, 'total_reward')\n reward_figure = effect_size_sim_output_viz.add_expected_reward_to_figure(reward_figure, prob_per_arm, step_sizes)\n reward_figure.savefig(outfile_prefix + 'RewardOverTime.pdf', bbox_inches='tight')\n \n # Plot arm statistics\n arm_df_by_trial = create_arm_stats_by_step(outfile_directory, num_sims, step_sizes[-1], num_arms)\n arm_stats_figure = effect_size_sim_output_viz.make_by_trial_arm_statistics(arm_df_by_trial, num_arms)\n arm_stats_figure.savefig(outfile_prefix + 'ArmStats.pdf', bbox_inches='tight')\n \nif __name__==\"__main__\":\n if sys.argv[1] == 'empirical':\n empirical_main()\n else:\n main()\n"
] |
[
[
"numpy.sqrt",
"matplotlib.use",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"scipy.stats.t.ppf",
"numpy.mean",
"matplotlib.pyplot.close",
"numpy.var",
"numpy.random.uniform",
"numpy.array",
"numpy.sum"
],
[
"pandas.concat",
"numpy.sqrt",
"numpy.abs",
"pandas.DataFrame",
"numpy.stack",
"numpy.mean",
"numpy.var",
"numpy.outer",
"numpy.array",
"numpy.sum"
]
] |
DemoAuguste/ZAQ-code
|
[
"9986a2d217ab5cb284e08c062f8726cabacb311e"
] |
[
"network/resnet.py"
] |
[
"''' ResNet in PyTorch. '''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU()\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = nn.ReLU(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n self.relu = nn.ReLU()\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = self.relu(self.bn1(self.conv1(x)))\n out = self.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = self.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512*block.expansion, num_classes)\n self.relu = nn.ReLU()\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x, out_feat=False):\n out = self.relu(self.bn1(self.conv1(x)))\n b1 = self.layer1(out)\n b2 = self.layer2(b1)\n b3 = self.layer3(b2)\n b4 = self.layer4(b3)\n out = F.avg_pool2d(b4, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n if out_feat:\n return (b1, b2, b3, b4), out\n return out\n\n\ndef ResNet18(num_classes=10):\n return ResNet(BasicBlock, [2,2,2,2], num_classes)\n\ndef ResNet34(num_classes=10):\n return ResNet(BasicBlock, [3,4,6,3], num_classes)\n\ndef ResNet50(num_classes=10):\n return ResNet(Bottleneck, [3,4,6,3], num_classes)\n\ndef ResNet101(num_classes=10):\n return ResNet(Bottleneck, [3,4,23,3], num_classes)\n\ndef ResNet152(num_classes=10):\n return ResNet(Bottleneck, [3,8,36,3], num_classes)\n\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
timtroendle/geographic-scale
|
[
"81ec940e10b8e692429797e6a066a177e1508a89"
] |
[
"src/analyse/composition_uncertainty.py"
] |
[
"from dataclasses import dataclass\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom seaborn import utils\nfrom seaborn.palettes import blend_palette\n\nGREEN = \"#679436\"\nRED = \"#A01914\"\nBLUE = \"#4F6DB8\"\nYELLOW = \"#FABC3C\"\nGREY = \"#C0C0C0\"\nCOLOR = BLUE\nMAX_VALUE = 2\nNUMBER_VARIABLES = 2\n\nGW_TO_TW = 1e-3\nMW_TO_TW = 1e-6\n\nCOLUMN_HEADER = [\n \"y-continental-scale-cost-eur\",\n \"y-national-scale-cost-eur\",\n \"y-cost-diff-eur\",\n \"y-cost-diff-relative\",\n \"y-supply-diff-relative\",\n \"y-wind-diff-relative\",\n \"y-balancing-diff-relative\",\n \"y-continental-scale-pv-gw\",\n \"y-national-scale-pv-gw\",\n \"y-continental-scale-wind-gw\",\n \"y-national-scale-wind-gw\",\n \"y-continental-scale-hydro-gw\",\n \"y-national-scale-hydro-gw\",\n \"y-continental-scale-biofuel-gw\",\n \"y-national-scale-biofuel-gw\",\n \"y-continental-scale-storage-gw\",\n \"y-national-scale-storage-gw\",\n \"y-continental-scale-storage-gwh\",\n \"y-national-scale-storage-gwh\",\n \"y-continental-scale-transmission-gwkm\",\n \"y-regional-scale-cost-eur\",\n \"y-regional-scale-pv-gw\",\n \"y-regional-scale-wind-gw\",\n \"y-regional-scale-hydro-gw\",\n \"y-regional-scale-biofuel-gw\",\n \"y-regional-scale-storage-gw\",\n \"y-regional-scale-storage-gwh\",\n \"y-regional-scale-transmission-gwkm\"\n]\n\n\n@dataclass\nclass PlotData:\n panel_id: str\n title: str\n x: pd.Series\n y: pd.Series\n xlabel: str\n ylabel: str\n xlim: tuple\n ylim: tuple\n\n\ndef plot_composition_variability(path_to_large_scales, path_to_small_scale, path_to_plot):\n plot_datas = read_plot_data(path_to_large_scales, path_to_small_scale)\n fig = plot_data(plot_datas)\n fig.savefig(path_to_plot, pil_kwargs={\"compression\": \"tiff_lzw\"})\n\n\ndef read_plot_data(path_to_large_scales, path_to_small_scale):\n y = pd.concat([\n pd.read_csv(path_to_large_scales, index_col=None, header=None),\n pd.read_csv(path_to_small_scale, index_col=None, header=None),\n\n ], axis=\"columns\") * GW_TO_TW\n y.columns = COLUMN_HEADER\n return [\n PlotData(\n panel_id=\"A\",\n title=\"Supply\",\n ylabel=\"National scale (TW)\",\n xlabel=\"Continental scale (TW)\",\n xlim=(0, MAX_VALUE),\n ylim=(0, MAX_VALUE),\n x=(y[\"y-continental-scale-wind-gw\"]\n + y[\"y-continental-scale-pv-gw\"]\n + y[\"y-continental-scale-hydro-gw\"]),\n y=(y[\"y-national-scale-wind-gw\"]\n + y[\"y-national-scale-pv-gw\"]\n + y[\"y-national-scale-hydro-gw\"]),\n ),\n PlotData(\n panel_id=\"B\",\n title=\"Balancing\",\n ylabel=\"National scale (TW)\",\n xlabel=\"Continental scale (TW)\",\n xlim=(0, MAX_VALUE),\n ylim=(0, MAX_VALUE),\n x=y[\"y-continental-scale-biofuel-gw\"] + y[\"y-continental-scale-storage-gw\"],\n y=y[\"y-national-scale-biofuel-gw\"] + y[\"y-national-scale-storage-gw\"],\n ),\n PlotData(\n panel_id=\"C\",\n title=\"Supply\",\n ylabel=\"Regional scale (TW)\",\n xlabel=\"Continental scale (TW)\",\n xlim=(0, MAX_VALUE),\n ylim=(0, MAX_VALUE),\n x=(y[\"y-continental-scale-wind-gw\"]\n + y[\"y-continental-scale-pv-gw\"]\n + y[\"y-continental-scale-hydro-gw\"]),\n y=(y[\"y-regional-scale-wind-gw\"]\n + y[\"y-regional-scale-pv-gw\"]\n + y[\"y-regional-scale-hydro-gw\"]),\n ),\n PlotData(\n panel_id=\"D\",\n title=\"Balancing\",\n ylabel=\"Regional scale (TW)\",\n xlabel=\"Continental scale (TW)\",\n xlim=(0, MAX_VALUE),\n ylim=(0, MAX_VALUE),\n x=y[\"y-continental-scale-biofuel-gw\"] + y[\"y-continental-scale-storage-gw\"],\n y=y[\"y-regional-scale-biofuel-gw\"] + y[\"y-regional-scale-storage-gw\"],\n )\n ]\n\n\ndef plot_data(plot_datas):\n fig = plt.figure(figsize=(4.41, 4))\n axes = fig.subplots(\n nrows=2,\n ncols=int(len(plot_datas) / 2),\n sharex=True,\n sharey=True\n )\n\n color_rgb = mpl.colors.colorConverter.to_rgb(COLOR)\n colors = [utils.set_hls_values(color_rgb, l=l) # noqa\n for l in np.linspace(1, 0, 12)]\n cmap = blend_palette(colors, as_cmap=True)\n\n for i, plot_data in enumerate(plot_datas):\n ax = axes[i // NUMBER_VARIABLES][i % NUMBER_VARIABLES]\n ax.hexbin(\n x=plot_data.x,\n y=plot_data.y,\n gridsize=int((plot_data.x.max() - plot_data.x.min()) * 20),\n cmap=cmap\n )\n ax.set_aspect('equal')\n ax.set_ylim(*plot_data.ylim)\n ax.set_xlim(*plot_data.xlim)\n ax.plot(plot_data.xlim, plot_data.ylim, \"--\", color=GREY)\n if i // NUMBER_VARIABLES == 1:\n ax.set_xlabel(plot_data.xlabel)\n else:\n for tick in ax.xaxis.get_major_ticks():\n tick.set_visible(False)\n ax.set_title(plot_data.title, loc=\"left\")\n if i % NUMBER_VARIABLES == 0:\n ax.set_ylabel(plot_data.ylabel)\n else:\n for tick in ax.yaxis.get_major_ticks():\n tick.set_visible(False)\n ax.set_title(plot_data.panel_id, loc=\"left\")\n plt.subplots_adjust(\n left=0.13,\n right=0.93,\n top=0.93\n )\n return fig\n\n\nif __name__ == \"__main__\":\n plot_composition_variability(\n path_to_large_scales=snakemake.input.large_scales,\n path_to_small_scale=snakemake.input.small_scale,\n path_to_plot=snakemake.output[0]\n )\n"
] |
[
[
"pandas.read_csv",
"numpy.linspace",
"matplotlib.colors.colorConverter.to_rgb",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.figure"
]
] |
lukasz-migas/pySPM
|
[
"3b5e050d776af03b451a1064e47ecd732861e0fe"
] |
[
"pySPM/PCA.py"
] |
[
"# -- coding: utf-8 --\n\n# Copyright 2018 Olivier Scholder <[email protected]>\n\n\"\"\"\nThis module performs the PCS with the help of the scikit library and gives the user various function for quick plotting.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import scale\nfrom sklearn.decomposition import PCA as PCA1\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nfrom pySPM.SPM import SPM_image\nimport matplotlib as mpl\nfrom matplotlib import cm\nfrom pySPM import collection\nimport re\n\nclass PCA:\n\n def __init__(self, data):\n self.data = data\n self.pca = None\n self.standX = None\n\n def scatter(self, **kargs):\n pd.tools.plotting.scatter_matrix(self.data, diagonal=\"kde\", **kargs)\n plt.tight_layout()\n\n def corr(self):\n corrmat = self.data.corr()\n return corrmat\n\n def corrShow(self):\n import seaborn as sns\n corrmat = self.corr()\n sns.heatmap(corrmat).xaxis.tick_top()\n\n def hinton(self, max_weight=None, ax=None, matrix = None, xlabel=None, ylabel=None):\n \"\"\"Draw Hinton diagram for visualizing a weight matrix.\"\"\"\n if matrix is None:\n matrix = self.corr()\n ax = ax if ax is not None else plt.gca()\n\n if not max_weight:\n max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2))\n\n ax.patch.set_facecolor('lightgray')\n ax.set_aspect('equal', 'box')\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n\n for (x, y), w in np.ndenumerate(matrix):\n color = 'red' if w > 0 else 'blue'\n size = np.sqrt(np.abs(w))\n rect = plt.Rectangle([y - size / 2, x - size / 2], size, size,\n facecolor=color, edgecolor=color)\n ax.add_patch(rect)\n\n nxticks = matrix.shape[1]\n nyticks = matrix.shape[0]\n if xlabel is None:\n xlabel = []\n for x in list(matrix.columns):\n x = re.sub(r'\\^([0-9]+)',r'^{\\1}', x)\n x = re.sub(r'_([0-9]+)',r'_{\\1}', x)\n if x[-1] in ['+','-']:\n x = x[:-1]+'^'+x[-1]\n xlabel.append('$'+x+'$')\n if ylabel is None:\n ylabel = list(matrix.index)\n ax.xaxis.tick_top()\n ax.set_xticks(range(nxticks))\n ax.set_xticklabels(xlabel, rotation=90)\n ax.set_yticks(range(nyticks))\n ax.set_yticklabels(ylabel)\n ax.grid(False)\n\n ax.autoscale_view()\n ax.invert_yaxis()\n\n def standardized(self, meanCentering=True):\n self.standX = pd.DataFrame(\n scale(self.data, with_mean=meanCentering), index=self.data.index, columns=self.data.columns)\n return self.standX\n\n def runPCA(self, meanCentering=True):\n if self.standX is None:\n self.standardized(meanCentering=meanCentering)\n self.pca = PCA1().fit(self.standX)\n\n def pca_summary(self):\n if self.pca is None:\n self.runPCA()\n names = [\"PC\"+str(i)\n for i in range(1, len(self.pca.explained_variance_ratio_)+1)]\n a = list(np.std(self.pca.transform(self.standX), axis=0))\n b = list(self.pca.explained_variance_ratio_)\n c = [np.sum(self.pca.explained_variance_ratio_[:i])\n for i in range(1, len(self.pca.explained_variance_ratio_)+1)]\n columns = pd.MultiIndex.from_tuples([(\"sdev\", \"Standard deviation\"), (\n \"varprop\", \"Proportion of Variance\"), (\"cumprop\", \"Cumulative Proportion\")])\n Z = zip(a, b, c)\n summary = pd.DataFrame(list(Z), index=names, columns=columns)\n return summary\n\n def screeplot(self, ax=None, num=None):\n if self.pca is None:\n self.runPCA()\n ax = ax if ax is not None else plt.gca()\n \n y = np.std(self.pca.transform(self.standX), axis=0)**2\n if num is None:\n num = len(y)\n x = np.arange(len(y)) + 1\n ax.grid(True)\n ax.plot(x[:num], y[:num], \"o-\")\n ax.set_xticks(x[:num])\n ax.set_xticklabels([\"PC\"+str(i) for i in x[:num]], rotation=60)\n ax.set_ylabel(\"Variance\")\n\n def pc(self, id=0):\n # find the number of samples in the data set and the number of\n # variables\n if self.pca is None:\n self.runPCA()\n pc = np.matmul(self.standX.as_matrix(), self.pca.components_[id].T)\n return pc\n\n def loadings(self, id=None):\n if self.pca is None:\n self.runPCA()\n if id is not None:\n return pd.DataFrame(self.pca.components_[id, None], columns=self.data.columns)\n return pd.DataFrame(self.pca.components_, columns=self.data.columns, index=[\"PC{0}\".format(i+1) for i in range(len(self.pca.components_))])\n\n def getPCAtransf(self):\n if self.pca is None:\n self.runPCA()\n return self.pca.transform(self.standX)\n\n def showStand(self):\n return pd.DataFrame([self.standX.apply(np.mean), self.standX.apply(np.std)], index=['Mean', 'Std'])\n\n def pca_scatter(self, classifs=None, light=False):\n import seaborn as sns\n foo = self.getPCAtransf()\n if classifs is None:\n if light:\n plt.scatter(foo[:, 0], foo[:, 1])\n else:\n bar = pd.DataFrame(\n list(zip(foo[:, 0], foo[:, 1])), columns=[\"PC1\", \"PC2\"])\n sns.lmplot(\"PC1\", \"PC2\", bar, fit_reg=False)\n else:\n if light:\n plt.scatter(foo[:, 0], foo[:, 1], color=cm.Scalar)\n else:\n bar = pd.DataFrame(list(zip(foo[:, 0], foo[:, 1], classifs)), columns=[\n \"PC1\", \"PC2\", \"Class\"])\n sns.lmplot(\"PC1\", \"PC2\", bar, hue=\"Class\", fit_reg=False)\n\n\nclass ITA_PCA(PCA):\n def __init__(self, c, channels=None):\n \"\"\"Init a PCA class from a collection\"\"\"\n self.col = c\n if channels is None:\n channels = c.channels.keys()\n mul = self.col.get_multivariate(channels)\n PCA.__init__(self, mul)\n\n def showPCA(self, num=None, ax=None, **kargs):\n c = self.getPCAcol(num)\n c.show(ax=ax, cmap='hot', **kargs)\n\n def getPCAcol(self, num=None):\n if num is None:\n num = self.data.shape[1]\n assert num <= self.data.shape[1]\n PCA_col = collection.Collection(\n cls=self.col, name=self.col.name+\"[PCA]\")\n for i in range(num):\n PC = self.getPCA(i)\n PCA_col.add(PC, 'PC{0}'.format(i+1))\n return PCA_col\n\n def getPCA(self, id=0):\n s = list(self.col.channels.values())[0].pixels.shape\n PC = self.pc(id).reshape(s)\n return PC\n"
] |
[
[
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.gca",
"numpy.log",
"matplotlib.pyplot.tight_layout",
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.scatter",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.tools.plotting.scatter_matrix",
"numpy.ndenumerate",
"sklearn.preprocessing.scale",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.NullLocator"
]
] |
likui01/miliamp
|
[
"cdf1b53efaa2fcbd0635cbf839fd7f3eb1eb2946"
] |
[
"src/code/33pro testing.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 25 13:07:56 2016\r\n\r\n@author: likui\r\n\"\"\"\r\nimport numpy as np\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.metrics import precision_recall_curve\r\nfrom sklearn.metrics import average_precision_score\r\nfrom roc import*\r\ndef seg(filename):\r\n file = open(filename,\"r\")\r\n lines1 = file.readlines()\r\n file.close()\r\n amino1=[]\r\n target=[]\r\n for i in range(1,69,2):\r\n amino1.append(lines1[i].strip('\\n'))\r\n for i in range(34): \r\n target.append(np.zeros((1,len(amino1[i]))))\r\n \r\n target[0][0,16:32]=1\r\n target[0][0,87:99]=1 \r\n target[1][0,12:28]=1 \r\n target[2][0,7:20]=1\r\n target[2][0,13:21]=1\r\n target[2][0,19:29]=1\r\n target[2][0,20:30]=1\r\n target[2][0,29:37]=1\r\n target[3][0,0:93]=1\r\n target[4][0,59:70]=1\r\n target[5][0,10:25]=1\r\n target[5][0,24:35]=1\r\n target[5][0,29:40]=1\r\n target[5][0,36:42]=1\r\n target[6][0,20:31]=1\r\n target[6][0,32:41]=1\r\n target[6][0,58:71]=1\r\n target[6][0,82:89]=1\r\n target[6][0,90:96]=1\r\n target[7][0,10:20]=1\r\n target[7][0,100:110]=1\r\n target[7][0,115:126]=1\r\n target[7][0,145:152]=1\r\n target[8][0,14:20]=1\r\n target[9][0,80:125]=1\r\n target[10][0,0:35]=1\r\n target[10][0,35:67]=1\r\n target[11][0,97:103]=1\r\n target[12][0,0:29]=1\r\n target[12][0,100:118]=1\r\n target[13][0,172:230]=1\r\n target[14][0,217:289]=1\r\n target[15][0,12:18]=1\r\n target[16][0,10:17]=1\r\n target[17][0,491:509]=1\r\n target[18][0,537:545]=1\r\n target[19][0,8:34]=1\r\n target[20][0,4:14]=1\r\n target[20][0,24:34]=1\r\n target[20][0,55:61]=1\r\n target[21][0,83:105]=1\r\n target[21][0,104:125]=1\r\n target[21][0,147:153]=1\r\n target[21][0,153:163]=1\r\n target[21][0,155:171]=1\r\n target[21][0,179:196]=1\r\n target[21][0,208:231]=1\r\n target[22][0,31:41]=1\r\n target[22][0,41:50]=1\r\n target[23][0,65:72]=1\r\n target[24][0,111:157]=1\r\n target[25][0,6:21]=1\r\n target[25][0,19:34]=1\r\n target[25][0,42:57]=1\r\n target[26][0,23:32]=1\r\n target[27][0,0:142]=1\r\n target[28][0,0:0:12]=1\r\n target[29][0,5:12]=1\r\n target[30][0,34:44]=1\r\n target[30][0,48:59]=1\r\n target[30][0,59:68]=1\r\n target[30][0,68:82]=1\r\n target[30][0,85:95]=1\r\n target[31][0,588:600]=1\r\n target[32][0,9:20]=1\r\n target[32][0,104:115]=1\r\n target[33][0,0:88]=1\r\n \r\n labels=[] \r\n proteins=[]\r\n for i in range(len(amino1)):\r\n \r\n qw=[]\r\n w1=[]\r\n for jj in range(len(amino1[i])-6):\r\n \r\n \r\n aa=amino1[i][jj:jj+6] #+kmer(seq[i][yy:yy+7])\r\n qw.append(aa)\r\n bb=sum(target[i][0,jj:jj+6])\r\n w1.append(bb)\r\n indexes = [ii for ii,x in enumerate(w1) if x == 5] \r\n # w1=np.array(w1)\r\n for ik in indexes:\r\n for ij in range(-8,0): \r\n if (ik-ij)<0:\r\n pass\r\n elif(ik--ij)<0:\r\n pass\r\n else:\r\n w1[ik--ij]=1.0\r\n if(ik+ij)>=len(w1):\r\n pass\r\n elif(ik-ij)>=len(w1):\r\n pass\r\n else:\r\n w1[ik-ij]=1.0\r\n \r\n \r\n for iii in range(len(w1)):\r\n if w1[iii]>1.0:\r\n w1[iii]=1.0\r\n proteins.append(qw) \r\n labels.append(w1)\r\n# import pdb;pdb.set_trace() \r\n return labels, proteins\r\ndef read(filename):\r\n file=open(filename,'r')\r\n lines=file.readlines()\r\n file.close()\r\n s=[]\r\n \r\n for i in range(len(lines)):\r\n s.append(float(lines[i].strip('\\n')))\r\n \r\n \r\n return s \r\ndef compute(score,labels):\r\n sc=[]\r\n for i in range(33):\r\n aq=[]\r\n aw=[]\r\n ww=[]\r\n for jj in range(len(labels[i])):\r\n aa=score[i+jj]\r\n ww.append(aa)\r\n if labels[i][jj]==1:\r\n aq.append(jj)\r\n aw.append(aa)\r\n \r\n else:\r\n pass\r\n \r\n for ie in (range(len(aq))):\r\n ww[aq[ie]]=max(aw) \r\n sc.append(ww) \r\n \r\n \r\n sco=[]\r\n lab=[] \r\n for i in range(33):\r\n for k in range(len(sc[i])):\r\n sco.append(sc[i][k])\r\n lab.append(labels[i][k])\r\n \r\n return sco,lab \r\nif __name__ == '__main__':\r\n labels,pro=seg('S333.txt') \r\n aa=[]\r\n for i in [('linerasvm.txt',':','o'),('linearmil24.txt','--','2'),('ampmil.txt','-.','*'),('me.txt','-','>'),('aggre.txt',':','v'),('mett2.txt','--','4')]:\r\n s=read(i[0]) \r\n sco,lab=compute(s,labels)\r\n \r\n fpr, tpr, thresholds = roc_curve(lab,sco) # plotting ROC \r\n a=auc(fpr,tpr)\r\n aa.append(a)\r\n# print a\r\n plt.plot(fpr,tpr, marker=i[2],linestyle=i[1])\r\n# plt.figure() \r\n plt.xlabel('Fpr') \r\n plt.ylabel('Tpr')\r\n plt.xlim([-0.05,0.4]) \r\n plt.grid() \r\n plt.legend(['Linear SVM:'+str(round(aa[0],3)*100),'MIL:'+str(round(aa[1],3)*100),'MIL-Rank:'+str(round(aa[2],3)*100),'MetAmyl:'+str(round(aa[3],3)*100),'Aggrescan:'+str(round(aa[4],3)*100),'APPNN:'+str(round(aa[5],3)*100)],loc=4)\r\n \r\n #plt.legend([', auc='+str(round(a[0],3)),'APPNN, auc='+str(round(a[1],3)),'Aggrescan, auc='+str(round(a[2],3)),'MetAmyl, auc='+str(round(a[3],3)),'Linear MIL, train ds1+ds2, auc='+str(round(a[4],3)),'LLCMIL, train ds1+ds2, auc='+str(round(a[5],3)),'LLCMIL5cv, auc='+str(round(a[6],3))],loc=4)\r\n \r\n \r\n \r\n \r\n \r\n \r\n# \r\n## \r\n#with open('S33w.fasta','w')as f:\r\n# for i in range(len(pro)-1):\r\n# for j in range(len(pro[i])): \r\n# f.write('>tro|'+str(i)+\"|\"+str(labels[i][j]))\r\n# f.write('\\n')\r\n# f.write(str(pro[i][j]))\r\n# \r\n# f.write(\"\\n\")\r\n# \r\n#f.close() \r\n## \r\n## \r\n# \r\n# \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "
] |
[
[
"sklearn.metrics.auc",
"sklearn.metrics.roc_curve"
]
] |
satw1knandala/Cirq
|
[
"ca307cd7ffdd2e8659c84c484788c8b5d15cc09d"
] |
[
"cirq/ops/common_channels.py"
] |
[
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum channels that are commonly used in the literature.\"\"\"\n\nimport itertools\nfrom typing import (Any, Dict, Iterable, Optional, Sequence, Tuple, Union,\n TYPE_CHECKING)\n\nimport numpy as np\n\nfrom cirq import protocols, value\nfrom cirq.ops import (raw_types, common_gates, pauli_gates, gate_features,\n identity)\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass AsymmetricDepolarizingChannel(gate_features.SingleQubitGate):\n \"\"\"A channel that depolarizes asymmetrically along different directions.\"\"\"\n\n def __init__(self,\n p_x: Optional[float] = None,\n p_y: Optional[float] = None,\n p_z: Optional[float] = None,\n error_probabilities: Optional[Dict[str, float]] = None,\n tol: float = 1e-8) -> None:\n r\"\"\"The asymmetric depolarizing channel.\n\n This channel applies one of 4**n disjoint possibilities: nothing (the\n identity channel) or one of the 4**n - 1 pauli gates.\n\n This channel evolves a density matrix via\n\n $$\n \\sum_i p_i Pi \\rho Pi\n $$\n\n where i varies from 0 to 4**n-1 and Pi represents n-qubit Pauli operator\n (including identity). The input \\rho is the density matrix before the\n depolarization.\n\n Args:\n p_x: The probability that a Pauli X and no other gate occurs.\n p_y: The probability that a Pauli Y and no other gate occurs.\n p_z: The probability that a Pauli Z and no other gate occurs.\n error_probabilities: Dictionary of string (Pauli operator) to its\n probability. If the identity is missing from the list, it will\n be added so that the total probability mass is 1.\n tol: The tolerance used making sure the total probability mass is\n equal to 1.\n\n Examples of calls:\n * Single qubit: AsymmetricDepolarizingChannel(0.2, 0.1, 0.3)\n * Single qubit: AsymmetricDepolarizingChannel(p_z=0.3)\n * Two qubits: AsymmetricDepolarizingChannel(\n error_probabilities={'XX': 0.2})\n\n Raises:\n ValueError: if the args or the sum of args are not probabilities.\n \"\"\"\n if error_probabilities:\n num_qubits = len(list(error_probabilities)[0])\n for k in error_probabilities.keys():\n if not set(k).issubset({'I', 'X', 'Y', 'Z'}):\n raise ValueError(f\"{k} is not made solely of I, X, Y, Z.\")\n if len(k) != num_qubits:\n raise ValueError(f\"{k} must have {num_qubits} Pauli gates.\")\n for k, v in error_probabilities.items():\n value.validate_probability(v, f\"p({k})\")\n sum_probs = sum(error_probabilities.values())\n identity = 'I' * num_qubits\n if sum_probs < 1.0 - tol and identity not in error_probabilities:\n error_probabilities[identity] = 1.0 - sum_probs\n elif abs(sum_probs - 1.0) > tol:\n raise ValueError(\n f\"Probabilities do not add up to 1 but to {sum_probs}\")\n self._num_qubits = num_qubits\n self._error_probabilities = error_probabilities\n else:\n p_x = 0.0 if p_x is None else p_x\n p_y = 0.0 if p_y is None else p_y\n p_z = 0.0 if p_z is None else p_z\n\n p_x = value.validate_probability(p_x, 'p_x')\n p_y = value.validate_probability(p_y, 'p_y')\n p_z = value.validate_probability(p_z, 'p_z')\n p_i = 1 - value.validate_probability(p_x + p_y + p_z,\n 'p_x + p_y + p_z')\n\n self._num_qubits = 1\n self._error_probabilities = {'I': p_i, 'X': p_x, 'Y': p_y, 'Z': p_z}\n\n def _mixture_(self) -> Sequence[Tuple[float, np.ndarray]]:\n ps = []\n for pauli in self._error_probabilities:\n Pi = np.identity(1)\n for gate in pauli:\n if gate == 'I':\n Pi = np.kron(Pi, protocols.unitary(identity.I))\n elif gate == 'X':\n Pi = np.kron(Pi, protocols.unitary(pauli_gates.X))\n elif gate == 'Y':\n Pi = np.kron(Pi, protocols.unitary(pauli_gates.Y))\n elif gate == 'Z':\n Pi = np.kron(Pi, protocols.unitary(pauli_gates.Z))\n ps.append(Pi)\n return tuple(zip(self._error_probabilities.values(), ps))\n\n def _num_qubits_(self) -> int:\n return self._num_qubits\n\n def _has_mixture_(self) -> bool:\n return True\n\n def _value_equality_values_(self):\n return self._num_qubits, hash(\n tuple(sorted(self._error_probabilities.items())))\n\n def __repr__(self) -> str:\n return ('cirq.asymmetric_depolarize(' +\n f\"error_probabilities={self._error_probabilities})\")\n\n def __str__(self) -> str:\n return ('asymmetric_depolarize(' +\n f\"error_probabilities={self._error_probabilities})\")\n\n def _circuit_diagram_info_(self,\n args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if self._num_qubits == 1:\n if args.precision is not None:\n return (f\"A({self.p_x:.{args.precision}g},\" +\n f\"{self.p_y:.{args.precision}g},\" +\n f\"{self.p_z:.{args.precision}g})\")\n return f\"A({self.p_x},{self.p_y},{self.p_z})\"\n if args.precision is not None:\n error_probabilities = [\n f\"{pauli}:{p:.{args.precision}g}\"\n for pauli, p in self._error_probabilities.items()\n ]\n else:\n error_probabilities = [\n f\"{pauli}:{p}\"\n for pauli, p in self._error_probabilities.items()\n ]\n return f\"A({', '.join(error_probabilities)})\"\n\n @property\n def p_x(self) -> float:\n \"\"\"The probability that a Pauli X and no other gate occurs.\"\"\"\n if self._num_qubits != 1:\n raise ValueError('num_qubits should be 1')\n return self._error_probabilities.get('X', 0.0)\n\n @property\n def p_y(self) -> float:\n \"\"\"The probability that a Pauli Y and no other gate occurs.\"\"\"\n if self._num_qubits != 1:\n raise ValueError('num_qubits should be 1')\n return self._error_probabilities.get('Y', 0.0)\n\n @property\n def p_z(self) -> float:\n \"\"\"The probability that a Pauli Z and no other gate occurs.\"\"\"\n if self._num_qubits != 1:\n raise ValueError('num_qubits should be 1')\n return self._error_probabilities.get('Z', 0.0)\n\n @property\n def num_qubits(self) -> int:\n \"\"\"The number of qubits\"\"\"\n return self._num_qubits\n\n @property\n def error_probabilities(self) -> Dict[str, float]:\n \"\"\"A dictionary from Pauli gates to probability\"\"\"\n return self._error_probabilities\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['error_probabilities'])\n\n\ndef asymmetric_depolarize(\n p_x: Optional[float] = None,\n p_y: Optional[float] = None,\n p_z: Optional[float] = None,\n error_probabilities: Optional[Dict[str, float]] = None,\n tol: float = 1e-8) -> AsymmetricDepolarizingChannel:\n r\"\"\"Returns a AsymmetricDepolarizingChannel with given parameter.\n\n This channel applies one of 4**n disjoint possibilities: nothing (the\n identity channel) or one of the 4**n - 1 pauli gates.\n\n This channel evolves a density matrix via\n\n $ \\sum_i p_i Pi \\rho Pi $\n\n where i varies from 0 to 4**n-1 and Pi represents n-qubit Pauli operator\n (including identity). The input \\rho is the density matrix before the\n depolarization.\n\n Args:\n p_x: The probability that a Pauli X and no other gate occurs.\n p_y: The probability that a Pauli Y and no other gate occurs.\n p_z: The probability that a Pauli Z and no other gate occurs.\n error_probabilities: Dictionary of string (Pauli operator) to its\n probability. If the identity is missing from the list, it will\n be added so that the total probability mass is 1.\n tol: The tolerance used making sure the total probability mass is\n equal to 1.\n\n Examples of calls:\n * Single qubit: AsymmetricDepolarizingChannel(0.2, 0.1, 0.3)\n * Single qubit: AsymmetricDepolarizingChannel(p_z=0.3)\n * Two qubits: AsymmetricDepolarizingChannel(\n error_probabilities={'XX': 0.2})\n\n Raises:\n ValueError: if the args or the sum of the args are not probabilities.\n \"\"\"\n return AsymmetricDepolarizingChannel(p_x, p_y, p_z, error_probabilities,\n tol)\n\n\[email protected]_equality\nclass DepolarizingChannel(gate_features.SingleQubitGate):\n \"\"\"A channel that depolarizes a qubit.\"\"\"\n\n def __init__(self, p: float, n_qubits: int = 1) -> None:\n r\"\"\"The symmetric depolarizing channel.\n\n This channel applies one of 4**n disjoint possibilities: nothing (the\n identity channel) or one of the 4**n - 1 pauli gates. The disjoint\n probabilities of the non-identity Pauli gates are all the same,\n p / (4**n - 1), and the identity is done with probability 1 - p. The\n supplied probability must be a valid probability or else this\n constructor will raise a ValueError.\n\n\n This channel evolves a density matrix via\n\n $ \\rho \\rightarrow (1 - p) \\rho + 1 / (4**n - 1) \\sum _i P_i X P_i $\n\n where P_i are the $4^n - 1$ Pauli gates (excluding the identity).\n\n Args:\n p: The probability that one of the Pauli gates is applied. Each of\n the Pauli gates is applied independently with probability\n p / (4**n - 1).\n n_qubits: the number of qubits.\n\n Raises:\n ValueError: if p is not a valid probability.\n \"\"\"\n\n error_probabilities = {}\n\n p_depol = p / (4**n_qubits - 1)\n p_identity = 1.0 - p\n for pauli_tuple in itertools.product(['I', 'X', 'Y', 'Z'],\n repeat=n_qubits):\n pauli_string = ''.join(pauli_tuple)\n if pauli_string == 'I' * n_qubits:\n error_probabilities[pauli_string] = p_identity\n else:\n error_probabilities[pauli_string] = p_depol\n\n self._p = p\n self._n_qubits = n_qubits\n\n self._delegate = AsymmetricDepolarizingChannel(\n error_probabilities=error_probabilities)\n\n def _mixture_(self) -> Sequence[Tuple[float, np.ndarray]]:\n return self._delegate._mixture_()\n\n def _has_mixture_(self) -> bool:\n return True\n\n def _value_equality_values_(self):\n return self._p\n\n def __repr__(self) -> str:\n if self._n_qubits == 1:\n return f\"cirq.depolarize(p={self._p})\"\n return f\"cirq.depolarize(p={self._p},n_qubits={self._n_qubits})\"\n\n def __str__(self) -> str:\n if self._n_qubits == 1:\n return f\"depolarize(p={self._p})\"\n return f\"depolarize(p={self._p},n_qubits={self._n_qubits})\"\n\n def _act_on_(self, args: Any) -> bool:\n from cirq.sim import clifford\n if isinstance(args, clifford.ActOnCliffordTableauArgs):\n if args.prng.random() < self._p:\n gate = args.prng.choice(\n [pauli_gates.X, pauli_gates.Y, pauli_gates.Z])\n protocols.act_on(gate, args)\n return True\n return NotImplemented\n\n def _circuit_diagram_info_(self,\n args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n return f\"D({self._p:.{args.precision}g})\"\n return f\"D({self._p})\"\n\n @property\n def p(self) -> float:\n \"\"\"The probability that one of the Pauli gates is applied.\n\n Each of the Pauli gates is applied independently with probability\n p / (4**n_qubits - 1).\n \"\"\"\n return self._p\n\n @property\n def n_qubits(self) -> int:\n \"\"\"The number of qubits\"\"\"\n return self._n_qubits\n\n def _json_dict_(self) -> Dict[str, Any]:\n if self._n_qubits == 1:\n return protocols.obj_to_dict_helper(self, ['p'])\n return protocols.obj_to_dict_helper(self, ['p', 'n_qubits'])\n\n\ndef depolarize(p: float, n_qubits: int = 1) -> DepolarizingChannel:\n r\"\"\"Returns a DepolarizingChannel with given probability of error.\n\n This channel applies one of 4**n disjoint possibilities: nothing (the\n identity channel) or one of the 4**n - 1 pauli gates. The disjoint\n probabilities of the non-identity Pauli gates are all the same,\n p / (4**n - 1), and the identity is done with probability 1 - p. The\n supplied probability must be a valid probability or else this constructor\n will raise a ValueError.\n\n This channel evolves a density matrix via\n\n $ \\rho \\rightarrow (1 - p) \\rho + 1 / (4**n - 1) \\sum _i P_i X P_i $\n\n where P_i are the $4^n - 1$ Pauli gates (excluding the identity).\n\n Args:\n p: The probability that one of the Pauli gates is applied. Each of\n the Pauli gates is applied independently with probability\n p / (4**n - 1).\n n_qubits: The number of qubits.\n\n Raises:\n ValueError: if p is not a valid probability.\n \"\"\"\n return DepolarizingChannel(p, n_qubits)\n\n\[email protected]_equality\nclass GeneralizedAmplitudeDampingChannel(gate_features.SingleQubitGate):\n \"\"\"Dampen qubit amplitudes through non ideal dissipation.\n\n This channel models the effect of energy dissipation into the environment\n as well as the environment depositing energy into the system.\n \"\"\"\n\n def __init__(self, p: float, gamma: float) -> None:\n r\"\"\"The generalized amplitude damping channel.\n\n Construct a channel to model energy dissipation into the environment\n as well as the environment depositing energy into the system. The\n probabilities with which the energy exchange occur are given by `gamma`,\n and the probability of the environment being not excited is given by\n `p`.\n\n The stationary state of this channel is the diagonal density matrix\n with probability `p` of being |0⟩ and probability `1-p` of being |1⟩.\n\n This channel evolves a density matrix via\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger\n + M_1 \\rho M_1^\\dagger\n + M_2 \\rho M_2^\\dagger\n + M_3 \\rho M_3^\\dagger\n $$\n\n With\n\n $$\n \\begin{aligned}\n M_0 =& \\sqrt{p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & \\sqrt{1 - \\gamma}\n \\end{bmatrix}\n \\\\\n M_1 =& \\sqrt{p} \\begin{bmatrix}\n 0 & \\sqrt{\\gamma} \\\\\n 0 & 0\n \\end{bmatrix}\n \\\\\n M_2 =& \\sqrt{1-p} \\begin{bmatrix}\n \\sqrt{1-\\gamma} & 0 \\\\\n 0 & 1\n \\end{bmatrix}\n \\\\\n M_3 =& \\sqrt{1-p} \\begin{bmatrix}\n 0 & 0 \\\\\n \\sqrt{\\gamma} & 0\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n gamma: the probability of the interaction being dissipative.\n p: the probability of the qubit and environment exchanging energy.\n\n Raises:\n ValueError: if gamma or p is not a valid probability.\n \"\"\"\n self._gamma = value.validate_probability(gamma, 'gamma')\n self._p = value.validate_probability(p, 'p')\n\n def _channel_(self) -> Iterable[np.ndarray]:\n p0 = np.sqrt(self._p)\n p1 = np.sqrt(1. - self._p)\n sqrt_g = np.sqrt(self._gamma)\n sqrt_g1 = np.sqrt(1. - self._gamma)\n return (\n p0 * np.array([[1., 0.], [0., sqrt_g1]]),\n p0 * np.array([[0., sqrt_g], [0., 0.]]),\n p1 * np.array([[sqrt_g1, 0.], [0., 1.]]),\n p1 * np.array([[0., 0.], [sqrt_g, 0.]]),\n )\n\n def _has_channel_(self) -> bool:\n return True\n\n def _value_equality_values_(self):\n return self._p, self._gamma\n\n def __repr__(self) -> str:\n return 'cirq.generalized_amplitude_damp(p={!r},gamma={!r})'.format(\n self._p, self._gamma)\n\n def __str__(self) -> str:\n return 'generalized_amplitude_damp(p={!r},gamma={!r})'.format(\n self._p, self._gamma)\n\n def _circuit_diagram_info_(self,\n args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n f = '{:.' + str(args.precision) + 'g}'\n return 'GAD({},{})'.format(f, f).format(self._p, self._gamma)\n return 'GAD({!r},{!r})'.format(self._p, self._gamma)\n\n @property\n def p(self) -> float:\n \"\"\"The probability of the qubit and environment exchanging energy.\"\"\"\n return self._p\n\n @property\n def gamma(self) -> float:\n \"\"\"The probability of the interaction being dissipative.\"\"\"\n return self._gamma\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['p', 'gamma'])\n\n\ndef generalized_amplitude_damp(p: float, gamma: float\n ) -> GeneralizedAmplitudeDampingChannel:\n r\"\"\"\n Returns a GeneralizedAmplitudeDampingChannel with the given\n probabilities gamma and p.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n + M_2 \\rho M_2^\\dagger + M_3 \\rho M_3^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\sqrt{p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & \\sqrt{1 - \\gamma}\n \\end{bmatrix}\n \\\\\n M_1 =& \\sqrt{p} \\begin{bmatrix}\n 0 & \\sqrt{\\gamma} \\\\\n 0 & 0\n \\end{bmatrix}\n \\\\\n M_2 =& \\sqrt{1-p} \\begin{bmatrix}\n \\sqrt{1-\\gamma} & 0 \\\\\n 0 & 1\n \\end{bmatrix}\n \\\\\n M_3 =& \\sqrt{1-p} \\begin{bmatrix}\n 0 & 0 \\\\\n \\sqrt{\\gamma} & 0\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n gamma: the probability of the interaction being dissipative.\n p: the probability of the qubit and environment exchanging energy.\n\n Raises:\n ValueError: gamma or p is not a valid probability.\n \"\"\"\n return GeneralizedAmplitudeDampingChannel(p, gamma)\n\n\[email protected]_equality\nclass AmplitudeDampingChannel(gate_features.SingleQubitGate):\n \"\"\"Dampen qubit amplitudes through dissipation.\n\n This channel models the effect of energy dissipation to the\n surrounding environment.\n \"\"\"\n\n def __init__(self, gamma: float) -> None:\n r\"\"\"The amplitude damping channel.\n\n Construct a channel that dissipates energy. The probability of\n energy exchange occurring is given by gamma.\n\n This channel evolves a density matrix as follows:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & \\sqrt{1 - \\gamma}\n \\end{bmatrix}\n \\\\\n M_1 =& \\begin{bmatrix}\n 0 & \\sqrt{\\gamma} \\\\\n 0 & 0\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n gamma: the probability of the interaction being dissipative.\n\n Raises:\n ValueError: is gamma is not a valid probability.\n \"\"\"\n self._gamma = value.validate_probability(gamma, 'gamma')\n self._delegate = GeneralizedAmplitudeDampingChannel(1.0, self._gamma)\n\n def _channel_(self) -> Iterable[np.ndarray]:\n # just return first two kraus ops, we don't care about\n # the last two.\n return list(self._delegate._channel_())[:2]\n\n def _has_channel_(self) -> bool:\n return True\n\n def _value_equality_values_(self):\n return self._gamma\n\n def __repr__(self) -> str:\n return 'cirq.amplitude_damp(gamma={!r})'.format(self._gamma)\n\n def __str__(self) -> str:\n return 'amplitude_damp(gamma={!r})'.format(self._gamma)\n\n def _circuit_diagram_info_(self,\n args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n f = '{:.' + str(args.precision) + 'g}'\n return 'AD({})'.format(f).format(self._gamma)\n return 'AD({!r})'.format(self._gamma)\n\n @property\n def gamma(self) -> float:\n \"\"\"The probability of the interaction being dissipative.\"\"\"\n return self._gamma\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['gamma'])\n\n\ndef amplitude_damp(gamma: float) -> AmplitudeDampingChannel:\n r\"\"\"\n Returns an AmplitudeDampingChannel with the given probability gamma.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & \\sqrt{1 - \\gamma}\n \\end{bmatrix}\n \\\\\n M_1 =& \\begin{bmatrix}\n 0 & \\sqrt{\\gamma} \\\\\n 0 & 0\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n gamma: the probability of the interaction being dissipative.\n\n Raises:\n ValueError: if gamma is not a valid probability.\n \"\"\"\n return AmplitudeDampingChannel(gamma)\n\n\[email protected]_equality\nclass ResetChannel(gate_features.SingleQubitGate):\n \"\"\"Reset a qubit back to its |0⟩ state.\n\n The reset channel is equivalent to performing an unobserved measurement\n which then controls a bit flip onto the targeted qubit.\n \"\"\"\n\n def __init__(self, dimension: int = 2) -> None:\n r\"\"\"The reset channel.\n\n Construct a channel that resets the qubit.\n\n This channel evolves a density matrix as follows:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & 0\n \\end{bmatrix}\n \\\\\n M_1 =& \\begin{bmatrix}\n 0 & 1 \\\\\n 0 & 0\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n dimension: Specify this argument when resetting a qudit. There will\n be `dimension` number of dimension by dimension matrices\n describing the channel, each with a 1 at a different position in\n the top row.\n \"\"\"\n self._dimension = dimension\n\n def _qid_shape_(self):\n return (self._dimension,)\n\n def _act_on_(self, args: Any):\n from cirq import sim\n\n if isinstance(args, sim.ActOnStateVectorArgs):\n # Do a silent measurement.\n measurements, _ = sim.measure_state_vector(\n args.target_tensor,\n args.axes,\n out=args.target_tensor,\n qid_shape=args.target_tensor.shape)\n result = measurements[0]\n\n # Use measurement result to zero the qid.\n if result:\n zero = args.subspace_index(0)\n other = args.subspace_index(result)\n args.target_tensor[zero] = args.target_tensor[other]\n args.target_tensor[other] = 0\n\n return True\n\n return NotImplemented\n\n def _channel_(self) -> Iterable[np.ndarray]:\n # The first axis is over the list of channel matrices\n channel = np.zeros((self._dimension,) * 3, dtype=np.complex64)\n channel[:, 0, :] = np.eye(self._dimension)\n return channel\n\n def _has_channel_(self) -> bool:\n return True\n\n def _value_equality_values_(self):\n return self._dimension\n\n def __repr__(self) -> str:\n if self._dimension == 2:\n return 'cirq.ResetChannel()'\n else:\n return 'cirq.ResetChannel(dimension={!r})'.format(self._dimension)\n\n def __str__(self) -> str:\n return 'reset'\n\n def _circuit_diagram_info_(self,\n args: 'protocols.CircuitDiagramInfoArgs') -> str:\n return 'R'\n\n @property\n def dimension(self) -> int:\n \"\"\"The dimension of the qudit being reset.\"\"\"\n return self._dimension\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['dimension'])\n\n\ndef reset(qubit: 'cirq.Qid') -> raw_types.Operation:\n \"\"\"Returns a `ResetChannel` on the given qubit.\n \"\"\"\n return ResetChannel(qubit.dimension).on(qubit)\n\n\[email protected]_equality\nclass PhaseDampingChannel(gate_features.SingleQubitGate):\n \"\"\"Dampen qubit phase.\n\n This channel models phase damping which is the loss of quantum\n information without the loss of energy.\n \"\"\"\n\n def __init__(self, gamma: float) -> None:\n r\"\"\"The phase damping channel.\n\n Construct a channel that enacts a phase damping constant gamma.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & \\sqrt{1 - \\gamma}\n \\end{bmatrix}\n \\\\\n M_1 =& \\begin{bmatrix}\n 0 & 0 \\\\\n 0 & \\sqrt{\\gamma}\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n gamma: The damping constant.\n\n Raises:\n ValueError: if gamma is not a valid probability.\n \"\"\"\n self._gamma = value.validate_probability(gamma, 'gamma')\n\n def _channel_(self) -> Iterable[np.ndarray]:\n return (\n np.array([[1., 0.], [0., np.sqrt(1. - self._gamma)]]),\n np.array([[0., 0.], [0., np.sqrt(self._gamma)]]),\n )\n\n def _has_channel_(self) -> bool:\n return True\n\n def _value_equality_values_(self):\n return self._gamma\n\n def __repr__(self) -> str:\n return 'cirq.phase_damp(gamma={!r})'.format(self._gamma)\n\n def __str__(self) -> str:\n return 'phase_damp(gamma={!r})'.format(self._gamma)\n\n def _circuit_diagram_info_(self,\n args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n f = '{:.' + str(args.precision) + 'g}'\n return 'PD({})'.format(f).format(self._gamma)\n return 'PD({!r})'.format(self._gamma)\n\n @property\n def gamma(self) -> float:\n \"\"\"The damping constant.\"\"\"\n return self._gamma\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['gamma'])\n\n\ndef phase_damp(gamma: float) -> PhaseDampingChannel:\n r\"\"\"\n Creates a PhaseDampingChannel with damping constant gamma.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & \\sqrt{1 - \\gamma}\n \\end{bmatrix}\n \\\\\n M_1 =& \\begin{bmatrix}\n 0 & 0 \\\\\n 0 & \\sqrt{\\gamma}\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n gamma: The damping constant.\n\n Raises:\n ValueError: is gamma is not a valid probability.\n \"\"\"\n return PhaseDampingChannel(gamma)\n\n\[email protected]_equality\nclass PhaseFlipChannel(gate_features.SingleQubitGate):\n \"\"\"Probabilistically flip the sign of the phase of a qubit.\"\"\"\n\n def __init__(self, p: float) -> None:\n r\"\"\"The phase flip channel.\n\n Construct a channel to flip the phase with probability p.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\sqrt{1 - p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & 1\n \\end{bmatrix}\n \\\\\n M_1 =& \\sqrt{p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & -1\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n p: the probability of a phase flip.\n\n Raises:\n ValueError: if p is not a valid probability.\n \"\"\"\n self._p = value.validate_probability(p, 'p')\n self._delegate = AsymmetricDepolarizingChannel(0., 0., p)\n\n def _mixture_(self) -> Sequence[Tuple[float, np.ndarray]]:\n mixture = self._delegate._mixture_()\n # just return identity and z term\n return (mixture[0], mixture[3])\n\n def _has_mixture_(self) -> bool:\n return True\n\n def _value_equality_values_(self):\n return self._p\n\n def __repr__(self) -> str:\n return 'cirq.phase_flip(p={!r})'.format(self._p)\n\n def __str__(self) -> str:\n return 'phase_flip(p={!r})'.format(self._p)\n\n def _circuit_diagram_info_(self,\n args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n f = '{:.' + str(args.precision) + 'g}'\n return 'PF({})'.format(f).format(self._p)\n return 'PF({!r})'.format(self._p)\n\n @property\n def p(self) -> float:\n \"\"\"The probability of a phase flip.\"\"\"\n return self._p\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['p'])\n\n\ndef _phase_flip_Z() -> common_gates.ZPowGate:\n \"\"\"\n Returns a cirq.Z which corresponds to a guaranteed phase flip.\n \"\"\"\n return common_gates.ZPowGate()\n\n\ndef _phase_flip(p: float) -> PhaseFlipChannel:\n r\"\"\"\n Returns a PhaseFlipChannel that flips a qubit's phase with probability p.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\sqrt{p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & 1\n \\end{bmatrix}\n \\\\\n M_1 =& \\sqrt{1-p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & -1\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n p: the probability of a phase flip.\n\n Raises:\n ValueError: if p is not a valid probability.\n \"\"\"\n return PhaseFlipChannel(p)\n\n\ndef phase_flip(\n p: Optional[float] = None\n) -> Union[common_gates.ZPowGate, PhaseFlipChannel]:\n r\"\"\"\n Returns a PhaseFlipChannel that flips a qubit's phase with probability p\n if p is None, return a guaranteed phase flip in the form of a Z operation.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\sqrt{p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & 1\n \\end{bmatrix}\n \\\\\n M_1 =& \\sqrt{1-p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & -1\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n p: the probability of a phase flip.\n\n Raises:\n ValueError: if p is not a valid probability.\n \"\"\"\n if p is None:\n return _phase_flip_Z()\n\n return _phase_flip(p)\n\n\[email protected]_equality\nclass BitFlipChannel(gate_features.SingleQubitGate):\n r\"\"\"Probabilistically flip a qubit from 1 to 0 state or vice versa.\"\"\"\n\n def __init__(self, p: float) -> None:\n r\"\"\"The bit flip channel.\n\n Construct a channel that flips a qubit with probability p.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\sqrt{1 - p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & 1\n \\end{bmatrix}\n \\\\\n M_1 =& \\sqrt{p} \\begin{bmatrix}\n 0 & 1 \\\\\n 1 & 0\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n p: the probability of a bit flip.\n\n Raises:\n ValueError: if p is not a valid probability.\n \"\"\"\n self._p = value.validate_probability(p, 'p')\n self._delegate = AsymmetricDepolarizingChannel(p, 0., 0.)\n\n def _mixture_(self) -> Sequence[Tuple[float, np.ndarray]]:\n mixture = self._delegate._mixture_()\n # just return identity and x term\n return (mixture[0], mixture[1])\n\n def _has_mixture_(self) -> bool:\n return True\n\n def _value_equality_values_(self):\n return self._p\n\n def __repr__(self) -> str:\n return 'cirq.bit_flip(p={!r})'.format(self._p)\n\n def __str__(self) -> str:\n return 'bit_flip(p={!r})'.format(self._p)\n\n def _circuit_diagram_info_(self,\n args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n f = '{:.' + str(args.precision) + 'g}'\n return 'BF({})'.format(f).format(self._p)\n return 'BF({!r})'.format(self._p)\n\n @property\n def p(self) -> float:\n \"\"\"The probability of a bit flip.\"\"\"\n return self._p\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['p'])\n\n\ndef _bit_flip(p: float) -> BitFlipChannel:\n r\"\"\"\n Construct a BitFlipChannel that flips a qubit state\n with probability of a flip given by p.\n\n This channel evolves a density matrix via:\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With:\n\n $$\n \\begin{aligned}\n M_0 =& \\sqrt{p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & 1\n \\end{bmatrix}\n \\\\\n M_1 =& \\sqrt{1-p} \\begin{bmatrix}\n 0 & 1 \\\\\n 1 & -0\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n p: the probability of a bit flip.\n\n Raises:\n ValueError: if p is not a valid probability.\n \"\"\"\n return BitFlipChannel(p)\n\n\ndef bit_flip(p: Optional[float] = None\n ) -> Union[common_gates.XPowGate, BitFlipChannel]:\n r\"\"\"\n Construct a BitFlipChannel that flips a qubit state\n with probability of a flip given by p. If p is None, return\n a guaranteed flip in the form of an X operation.\n\n This channel evolves a density matrix via\n\n $$\n \\rho \\rightarrow M_0 \\rho M_0^\\dagger + M_1 \\rho M_1^\\dagger\n $$\n\n With\n\n $$\n \\begin{aligned}\n M_0 =& \\sqrt{p} \\begin{bmatrix}\n 1 & 0 \\\\\n 0 & 1\n \\end{bmatrix}\n \\\\\n M_1 =& \\sqrt{1-p} \\begin{bmatrix}\n 0 & 1 \\\\\n 1 & -0\n \\end{bmatrix}\n \\end{aligned}\n $$\n\n Args:\n p: the probability of a bit flip.\n\n Raises:\n ValueError: if p is not a valid probability.\n \"\"\"\n if p is None:\n return pauli_gates.X\n\n return _bit_flip(p)\n"
] |
[
[
"numpy.sqrt",
"numpy.eye",
"numpy.identity",
"numpy.array",
"numpy.zeros"
]
] |
neuronit/pfa
|
[
"6483f23de3ac43ae1121760ab44a2cae1f2cc901"
] |
[
"parameters_research/save_and_load.py"
] |
[
"# -*- coding: utf-8 -*-\n\n#############################################\n# dependencies #\n#############################################\n\nimport sys\nimport os.path\nimport numpy as np\n# others\nimport global_vars as g \n\n\n#############################################\n# code #\n#############################################\n\n\n\n#===========================================#\n# function save:\n# in: int i_epoch, nb_epochs & save_frequency: used to know if it is time to save\n# int i_simul: how many simulsations to save (in term of networks weights)\n# array save_header: what to save first\n# out: the save path if saved, else the boelean False \n#-------------------------------------------#\n# Saves the global_save into a file save_path.\n# The file is created if it doesn't exists.\n#-------------------------------------------#\n# What do we want about the weights ?\n# --> to update the networks' weights\n# So we arrange the global_load into a list of weights,\n# orderer by increasing number of simulation then\n# by increasing id_network, so at each new simulation\n# we just have to pop the begining of global_load into\n# the networks' weights\n#===========================================#\n\ndef save(save_path, i_epoch, nb_epochs, i_simul, save_frequency, save_header) :\n if save_path is None:\n return False\n g.global_save = []\n if (i_epoch%save_frequency) == 0 or (i_epoch == nb_epochs-1): # We want to save at the frequency save_frequency\n # and at the last epoch.\n for i_net in range(1, g.nb_networks-1): # Saving all networks.\n g.networks_weights[i_simul][i_net] = g.networks[i_net].weights # Updating the weights from the networks themselves for this simulation\n g.global_save += save_header # Erasing former save /!\\ need a copy\n for simul in range(i_simul+1): # we want to save the previous and the current simuls, so range(i_simul+1)\n for net in range(g.nb_networks):\n g.global_save.append( g.networks_weights[simul][net] ) # Update the current weights.\n\n user_name = save_header[6][0]\n game_name = save_header[6][1]\n np.savez(save_path, *g.global_save)\n return save_path\n else:\n return False\n\n\n \n#===========================================#\n# function load:\n# in: a path \".../file.npz\" to load\n# out: the params needed to run multi_networks,\n#-------------------------------------------#\n# Loads the file \"path\", extracts the informations\n# to run the function multi_networks() with the\n# correct parameters, and global_path contains the\n# weights of the networks for each simulation.\n# Raise an exception if the path is incorrect.\n#-------------------------------------------#\n# What do we want about the weights ?\n# --> to update the networks' weights\n# we arrange the global_load into a list of weights,\n# orderer by increasing number of simulation, then\n# by increasing id_network, so at each new simulation\n# we just have to pop the begining of global_load into\n# the networks' weights\n#===========================================#\n\ndef load(path):\n g.global_load = []\n if os.path.isfile(path) : # the file exists\n npzfile = np.load(path) # this doesn't load in the correct order, so we have to reorder it\n _load = ['arr_{}'.format(i) for i in range(len(npzfile.files))] # reordered\n g.global_load = [npzfile[i] for i in _load] # loaded in good order\n\n # extracting the header # When the header is extracted, the weights remain and will be load during the multi_networks() function \n # --------------------- # because we need the instantiated networks to update their weights.\n params = []\n for i in range(6): # concerns all the \"global_***_arrays\"\n params.append(g.global_load.pop(0))\n _load = g.global_load.pop(0) # It concatenates with an array which contains: user_name, g.nb_networks, game_name, game_observation_size, game_decision_size, max_score, nb_epochs, nb_simulations, save_frequency, rl_gamma and rl_epsilon.\n params.append(_load[0]) # user\n params.append(_load[1]) # game\n params.append(int(_load[2])) # observation_size\n params.append(int(_load[3])) # decision_size\n params.append(int(_load[4])) # max_score\n params.append(int(_load[5])) # nb_epochs\n params.append(int(_load[6])) # nb_simulations\n params.append(int(_load[7])) # save_freq\n params.append(float(_load[8])) # rl_gamma\n params.append(float(_load[9])) # rl_epsilon\n params += [True] # Last parameter: was_loaded = True.\n # --------------------- header extracted\n \n return params\n else:\n raise Exception('Uncorrect path to load')\n \n \n"
] |
[
[
"numpy.load",
"numpy.savez"
]
] |
yogendrapal/askaquestion-rpi
|
[
"5b53cfca5ab2622349e48772758cd5793219834d"
] |
[
"facerec.py"
] |
[
"import face_recognition\nimport cv2\nfrom config import *\nimport json\nimport numpy as np\nimport time\nimport logging\n\n\n'''\nThis function will generate 2 face encodings for the person in\nfront of the camera and will return it. If no face is found for\n100 frames, then None is returned.\n'''\ndef generate_face_encodings(video_device=VIDEO_DEVICE):\n\tretry = 1\n\twhile retry < 10:\n\t\tvideo_capture = cv2.VideoCapture()\n\t\tif not video_capture.open(video_device):\n\t\t\ttime.sleep(2)\n\t\t\tretry += 1\n\t\telse:\n\t\t\tprint('Please wait for the system to register your face...')\n\t\t\tlogging.info('Please wait for the system to register your face...')\n\t\t\tbreak\n\tif(retry==10):\n\t\treturn\n\tvideo_capture.set(cv2.CAP_PROP_FRAME_WIDTH,160)\n\tvideo_capture.set(cv2.CAP_PROP_FRAME_HEIGHT,120)\n\n\tface_locations = []\n\tframe_count = 0\n\tskipframe = False\n\ttarget_skip_frame = 0\n\n\tface_encodings = []\n\n\twhile True:\n\t\t# Grab a single frame of video\n\t\tret, frame = video_capture.read()\n\t\tframe_count += 1\n\t\tif frame_count == target_skip_frame:\n\t\t\tskipframe = False\n\n\t\trgb_frame = frame[:, :, ::-1]\n\n\t\t# Find all the faces and face encodings in the current frame of video\n\t\tface_locations = face_recognition.face_locations(rgb_frame)\n\n\t\tif len(face_locations) == 1 and not skipframe:\n\t\t\tif len(face_encodings) < NUM_FACE_ENCODINGS_PER_RECORD:\n\t\t\t\tfenc = face_recognition.face_encodings(rgb_frame, face_locations)[0]\n\t\t\t\tface_encodings.append(fenc)\n\t\t\t\ttarget_skip_frame = frame_count + MIN_FRAME_SKIP_BW_TWO_ENCODINGS\n\t\t\t\tskipframe = True\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\tif frame_count > MAX_FRAME_COUNT:\n\t\t\treturn None\n\t\t# Hit 'q' on the keyboard to quit!\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\tbreak\n\tprint('frame-count:',frame_count)\n\tlogging.info('frame-count: '+str(frame_count))\n\t# Release handle to the webcam\n\tvideo_capture.release()\n\tcv2.destroyAllWindows()\n\tprint('[INFO]: Returning face encodings...\\n')\n\tlogging.info('[INFO]: Returning face encodings...\\n')\n\treturn face_encodings\n\n'''\nThis function will store the passed fencs into the npz file with the name fid.\nfencs contain the face encodings of the video with local id as fid\n'''\ndef store_face_encodings(fencs, fid):\n\ttry:\n\t\tnpz = np.load(FACE_ENCS_NPZ_PATH)\n\t\tfe_data = dict(npz)\n\texcept:\n\t\tfe_data = {}\n\tfor i in range(len(fencs)):\n\t\tfe_data[fid+('_%d'%(i))] = fencs[i]\n\tnp.savez(FACE_ENCS_NPZ_PATH,**fe_data)\n\tprint('[INFO]: Saved face encodings...\\n')\n\tlogging.info('[INFO]: Saved face encodings...\\n')\n\n\n'''\nThis function gets all stored face encodings from the npz file and\ncreates a list of encodings and fids to be used for comparision\n'''\ndef fetch_all_face_encodings():\n\ttry:\n\t\tnpz = np.load(FACE_ENCS_NPZ_PATH)\n\t\tfe_data = dict(npz)\n\texcept:\n\t\tfe_data = {}\n\tfencs = []\n\tfenc_ids = []\n\t# print()\n\t# print(fe_data)\n\t# print()\n\tfor fid in fe_data:\n\t\tfenc_ids.append(fid.split('_')[0])\n\t\tfencs.append(fe_data[fid])\n\treturn fencs,fenc_ids\n\tprint('[INFO]: Fetched face encodings...\\n')\n\tlogging.info('[INFO]: Fetched face encodings...\\n')\n\n'''\nThis function removes all the face encodings corresponding to the\npassed fid. And updates the npz file.\n'''\ndef remove_face_encodings(fid):\n\ttry:\n\t\tnpz = np.load(FACE_ENCS_NPZ_PATH)\n\t\tfe_data = dict(npz)\n\texcept:\n\t\treturn\n\tfor i in range(NUM_FACE_ENCODINGS_PER_RECORD):\n\t\tenc = fe_data.pop(fid+('_%d'%i),None)\n\t\tif not enc:\n\t\t\tbreak\n\tnp.savez(FACE_ENCS_NPZ_PATH,**fe_data)\n\n\n\n'''\nThis function runs the face recogntion and tries to the match\nthe face in front of the camera with the stored face encodings.\nif a match is found it returns the corresponding fids else it returns empty set\n'''\ndef fetch_fid(video_device=VIDEO_DEVICE):\n\tretry = 1\n\twhile retry < 10:\n\t\tvideo_capture = cv2.VideoCapture()\n\t\tif not video_capture.open(video_device):\n\t\t\ttime.sleep(2)\n\t\t\tretry += 1\n\t\telse:\n\t\t\tprint('Please look into the camera...')\n\t\t\tlogging.info('Please look into the camera...')\n\t\t\tbreak\n\tif(retry==10):\n\t\treturn\n\tvideo_capture.set(cv2.CAP_PROP_FRAME_WIDTH,160)\n\tvideo_capture.set(cv2.CAP_PROP_FRAME_HEIGHT,120)\n\tknown_fencs, known_fids = fetch_all_face_encodings()\n\n\tframe_count = 0\n\tfound_count = 0\n\n\twhile True:\n\t\t# Grab a single frame of video\n\t\tret, frame = video_capture.read()\n\t\tframe_count += 1\n\n\t\trgb_frame = frame[:, :, ::-1]\n\n\t\tface_locations = face_recognition.face_locations(rgb_frame)\n\t\tif len(face_locations) > 0:\n\t\t\tfound_count += 1\n\t\t\tprint('[INFO]: Detected %d face(s).\\n'%len(face_locations))\n\t\t\tlogging.info('[INFO]: Detected %d face(s).\\n'%len(face_locations))\n\t\tface_encodings = face_recognition.face_encodings(rgb_frame, face_locations)\n\n\t\tres_fid = set()\n\t\tfor enc in face_encodings:\n\t\t\tmatches = face_recognition.compare_faces(known_fencs, enc,tolerance=FACE_DISTANCE_TOLERANCE)\n\t\t\t# print(matches)\n\t\t\t# face_distances = face_recognition.face_distance(known_fencs, enc)\n\t\t\t# print(face_distances)\n\t\t\t# best_match_index = np.argmin(face_distances)\n\t\t\t# if matches[best_match_index]:\n\t\t\t# \tname = known_fids[best_match_index]\n\n\t\t\tif True in matches:\n\t\t\t\tfor m in range(len(matches)):\n\t\t\t\t\tif matches[m]:\n\t\t\t\t\t\tres_fid.add(known_fids[m])\n\t\t\t\tbreak\n\t\tif res_fid:\n\t\t\tprint('got matching face encodings')\n\t\t\tlogging.info('got matching face encodings')\n\t\t\tbreak\n\t\telif found_count == 3:\n\t\t\tbreak\n\n\t\tif frame_count > MAX_FRAME_COUNT:\n\t\t\tbreak\n\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\tbreak\n\tvideo_capture.release()\n\tcv2.destroyAllWindows()\n\n\treturn res_fid"
] |
[
[
"numpy.load",
"numpy.savez"
]
] |
etseidler/sagemaker-deployment
|
[
"84eb84d0793f7c14515ef55aadd4954912e3b0bf"
] |
[
"Project/train/train.py"
] |
[
"import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef _get_train_data_loader(batch_size, training_dir):\n print(\"Get train data loader.\")\n\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()\n train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()\n\n train_ds = torch.utils.data.TensorDataset(train_X, train_y)\n\n return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)\n\n\ndef train(model, train_loader, epochs, optimizer, loss_fn, device):\n \"\"\"\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n optimizer - The optimizer to use during training.\n loss_fn - The loss function used for training.\n device - Where the model and data should be loaded (gpu or cpu).\n \"\"\"\n \n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n optimizer.zero_grad()\n output = model(batch_X)\n loss = loss_fn(output, batch_y)\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n\n\nif __name__ == '__main__':\n # All of the model parameters and training parameters are sent as arguments when the script\n # is executed. Here we set up an argument parser to easily access the parameters.\n\n parser = argparse.ArgumentParser()\n\n # Training Parameters\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 512)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n # Model Parameters\n parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',\n help='size of the word embeddings (default: 32)')\n parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',\n help='size of the hidden dimension (default: 100)')\n parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',\n help='size of the vocabulary (default: 5000)')\n\n # SageMaker Parameters\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))\n parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])\n parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device {}.\".format(device))\n\n torch.manual_seed(args.seed)\n\n # Load the training data.\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir)\n\n # Build the model.\n model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)\n\n with open(os.path.join(args.data_dir, \"word_dict.pkl\"), \"rb\") as f:\n model.word_dict = pickle.load(f)\n\n print(\"Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.\".format(\n args.embedding_dim, args.hidden_dim, args.vocab_size\n ))\n\n # Train the model.\n optimizer = optim.Adam(model.parameters())\n loss_fn = torch.nn.BCELoss()\n\n train(model, train_loader, args.epochs, optimizer, loss_fn, device)\n\n # Save the parameters used to construct the model\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'embedding_dim': args.embedding_dim,\n 'hidden_dim': args.hidden_dim,\n 'vocab_size': args.vocab_size,\n }\n torch.save(model_info, f)\n\n\t# Save the word_dict\n word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'wb') as f:\n pickle.dump(model.word_dict, f)\n\n\t# Save the model parameters\n model_path = os.path.join(args.model_dir, 'model.pth')\n with open(model_path, 'wb') as f:\n torch.save(model.cpu().state_dict(), f)\n"
] |
[
[
"torch.load",
"torch.manual_seed",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.nn.BCELoss",
"torch.cuda.is_available",
"torch.save"
]
] |
SizheWei/OpenCompoundDomainAdaptation-OCDA
|
[
"ee9b2186256b5dc231133283559c6c221e53549d"
] |
[
"source/algorithms/train_scheduled_mann_net.py"
] |
[
"import os\nfrom os.path import join\nfrom copy import deepcopy\n\n# Import from torch\nimport torch\nimport torch.optim as optim\n\n# Import from within Package \nfrom ..models.utils import get_model\nfrom ..data.utils import load_data_multi, get_dataset_multi\nfrom ..data.sampler import DomainScheduledSampler\n\nimport pdb\n\n\ndef train_epoch(loader_src, loader_tgt, net, domain_factor_net, opt_net, opt_dis,\n opt_selector_content, opt_selector_domain_factor, opt_classifier, epoch, the=0.6, domain_factor_cond=0):\n \n log_interval = 10 # specifies how often to display\n \n N = len(loader_tgt.dataset)\n joint_loader = zip(loader_src, loader_tgt)\n\n net.train()\n domain_factor_net.eval()\n \n last_update = -1\n\n for batch_idx, ((data_s, _), (data_t, _)) in enumerate(joint_loader):\n \n if len(data_s) == 1 or len(data_t) == 1: # BN protection\n continue\n\n # log basic mann train info\n info_str = \"[Train Schedule Mann] Epoch: {} [{}/{} ({:.2f}%)]\".format(epoch, batch_idx * len(data_t),\n N, 100 * batch_idx * len(data_t) / N)\n \n ########################\n # Setup data variables #\n ########################\n if torch.cuda.is_available():\n data_s = data_s.cuda()\n data_t = data_t.cuda()\n\n data_s.require_grad = False\n data_t.require_grad = False\n\n ##########################\n # Optimize discriminator #\n ##########################\n\n # extract and concat features\n score_s, x_s = net.src_net(data_s.clone())\n score_t, x_t = net.tgt_net(data_t.clone())\n\n ###########################\n # storing direct feature\n direct_feature = x_t.clone()\n\n # set up visual memory\n keys_memory = net.centroids.detach().clone()\n\n # computing memory feature by querying and associating visual memory\n values_memory = score_t.clone()\n values_memory = values_memory.softmax(dim=1)\n memory_feature = torch.matmul(values_memory, keys_memory)\n\n if domain_factor_cond == 0:\n # computing concept selector\n concept_selector = net.fc_selector(x_t.clone()).tanh()\n x_t = direct_feature + concept_selector * memory_feature\n elif domain_factor_cond == 1:\n with torch.no_grad():\n domain_factor_ftr = domain_factor_net(data_t).detach()\n domain_factor_selector = net.domain_factor_selector(x_t).tanh()\n x_t = direct_feature + domain_factor_selector * domain_factor_ftr\n elif domain_factor_cond == 2:\n # computing concept selector\n concept_selector = net.fc_selector(x_t.clone()).tanh()\n with torch.no_grad():\n domain_factor_ftr = domain_factor_net(data_t.clone()).detach()\n domain_factor_selector = net.domain_factor_selector(x_t.clone()).tanh()\n x_t = direct_feature + concept_selector * memory_feature + 0.01 * domain_factor_selector * domain_factor_ftr\n elif domain_factor_cond == 3:\n with torch.no_grad():\n domain_factor_ftr = domain_factor_net(data_t.clone()).detach()\n domain_indicator = net.domain_factor_selector(domain_factor_ftr.clone()).tanh()\n x_t = direct_feature + domain_indicator * memory_feature\n elif domain_factor_cond == 4:\n # computing concept selector\n concept_selector = net.fc_selector(x_t.clone()).tanh()\n with torch.no_grad():\n domain_factor_ftr = domain_factor_net(data_t.clone()).detach()\n domain_factor_selector = net.domain_factor_selector(domain_factor_ftr.clone()).tanh()\n x_t = direct_feature + domain_factor_selector * concept_selector * memory_feature\n else:\n raise Exception(\"No such domain_factor_cond: {}\".format(domain_factor_cond))\n\n # apply cosine norm classifier\n score_t = net.classifier(x_t.clone())\n ###########################\n\n f = torch.cat((score_s, score_t), 0)\n \n # predict with discriminator\n pred_concat = net.discriminator(f.clone())\n\n # prepare real and fake labels: source=1, target=0\n target_dom_s = torch.ones(len(data_s), requires_grad=False).long()\n target_dom_t = torch.zeros(len(data_t), requires_grad=False).long()\n label_concat = torch.cat((target_dom_s, target_dom_t), 0).cuda()\n\n # compute loss for disciminator\n loss_dis = net.gan_criterion(pred_concat.clone(), label_concat)\n\n # zero gradients for optimizer\n opt_dis.zero_grad()\n\n # loss backprop\n loss_dis.backward()\n\n # optimize discriminator\n opt_dis.step()\n\n # compute discriminator acc\n pred_dis = torch.squeeze(pred_concat.max(1)[1])\n acc = (pred_dis == label_concat).float().mean()\n \n # log discriminator update info\n info_str += \" acc: {:0.1f} D: {:.3f}\".format(acc.item()*100, loss_dis.item())\n\n ###########################\n # Optimize target network #\n ###########################\n\n # only update net if discriminator is strong\n if acc.item() > the:\n \n last_update = batch_idx\n \n # extract target features\n score_t, x_t = net.tgt_net(data_t.clone())\n\n ###########################\n # storing direct feature\n direct_feature = x_t.clone()\n\n # set up visual memory\n keys_memory = net.centroids.detach().clone()\n\n # computing memory feature by querying and associating visual memory\n values_memory = score_t.clone()\n values_memory = values_memory.softmax(dim=1)\n memory_feature = torch.matmul(values_memory, keys_memory)\n\n if domain_factor_cond == 0:\n # computing concept selector\n concept_selector = net.fc_selector(x_t.clone()).tanh()\n x_t = direct_feature + concept_selector * memory_feature\n elif domain_factor_cond == 1:\n with torch.no_grad():\n domain_factor_ftr = domain_factor_net(data_t).detach()\n domain_factor_selector = net.domain_factor_selector(x_t).tanh()\n x_t = direct_feature + domain_factor_selector * domain_factor_ftr\n elif domain_factor_cond == 2:\n # computing concept selector\n concept_selector = net.fc_selector(x_t.clone()).tanh()\n with torch.no_grad():\n domain_factor_ftr = domain_factor_net(data_t.clone()).detach()\n domain_factor_selector = net.domain_factor_selector(x_t.clone()).tanh()\n x_t = direct_feature + concept_selector * memory_feature + 0.01 * domain_factor_selector * domain_factor_ftr\n elif domain_factor_cond == 3:\n with torch.no_grad():\n domain_factor_ftr = domain_factor_net(data_t).detach()\n domain_indicator = net.domain_factor_selector(domain_factor_ftr).tanh()\n x_t = direct_feature + domain_indicator * memory_feature\n elif domain_factor_cond == 4:\n # computing concept selector\n concept_selector = net.fc_selector(x_t.clone()).tanh()\n with torch.no_grad():\n domain_factor_ftr = domain_factor_net(data_t.clone()).detach()\n domain_factor_selector = net.domain_factor_selector(domain_factor_ftr.clone()).tanh()\n x_t = direct_feature + domain_factor_selector * concept_selector * memory_feature\n else:\n raise Exception(\"No such domain_factor_cond: {}\".format(domain_factor_cond))\n\n # apply cosine norm classifier\n score_t = net.classifier(x_t.clone())\n ###########################\n\n ###########################\n # predict with discriinator\n ###########################\n pred_tgt = net.discriminator(score_t)\n \n # create fake label\n label_tgt = torch.ones(pred_tgt.size(0), requires_grad=False).long().cuda()\n \n # compute loss for target network\n loss_gan_t = net.gan_criterion(pred_tgt, label_tgt)\n\n # zero out optimizer gradients\n opt_dis.zero_grad()\n opt_net.zero_grad()\n\n opt_selector_content.zero_grad()\n opt_classifier.zero_grad()\n\n if opt_selector_domain_factor:\n opt_selector_domain_factor.zero_grad()\n\n # loss backprop\n loss_gan_t.backward()\n\n # optimize tgt network\n opt_net.step()\n opt_selector_content.step()\n opt_classifier.step()\n if opt_selector_domain_factor:\n opt_selector_domain_factor.step()\n\n # log net update info\n info_str += \" G: {:.3f}\".format(loss_gan_t.item()) \n\n ###########\n # Logging #\n ###########\n if batch_idx % log_interval == 0:\n print(info_str)\n\n return last_update\n\n\ndef train_scheduled_mann_multi(args):\n\n \"\"\"Main function for training mann.\"\"\"\n\n src = args.src\n tgt = args.tgt\n base_model = args.base_model\n domain_factor_model = args.domain_factor_model\n num_cls = args.num_cls\n tgt_list = args.tgt_list\n sort_idx = args.sort_idx\n power = args.power\n initial_ratio = args.initial_ratio\n schedule_strategy = args.schedule_strategy\n num_epoch = args.num_epoch_scheduled\n batch = args.batch\n datadir = args.datadir\n outdir = args.outdir_scheduled\n src_weights = args.src_net_file\n domain_factor_weights = args.domain_factor_net_file\n lr = args.scheduled_mann_lr\n betas = tuple(args.betas)\n weight_decay = args.weight_decay\n domain_factor_cond = args.domain_factor_cond\n centroids_path = args.centroids_src_file\n\n ###########################\n # Setup cuda and networks #\n ###########################\n\n # setup cuda\n if torch.cuda.is_available():\n kwargs = {'num_workers': 8, 'pin_memory': True}\n else:\n kwargs = {}\n\n # setup network \n net = get_model('MannNet', model=base_model, num_cls=num_cls,\n src_weights_init=src_weights,\n use_domain_factor_selector=(domain_factor_cond != 0),\n centroids_path=centroids_path)\n\n domain_factor_net = deepcopy(get_model('DomainFactorNet', num_cls=num_cls,\n base_model=base_model, domain_factor_model=domain_factor_model,\n weights_init=domain_factor_weights, eval=True).domain_factor_net)\n\n domain_factor_net.eval()\n\n # print network and arguments\n print(net)\n print('Training Scheduled Mann {} model for {}->{}'.format(base_model, src, tgt))\n\n #######################################\n # Setup data for training and testing #\n #######################################\n train_src_data = load_data_multi(src, 'train', batch=batch,\n rootdir=join(datadir, src), num_channels=net.num_channels,\n image_size=net.image_size, download=False, kwargs=kwargs)\n\n train_tgt_set = get_dataset_multi(tgt_list, 'train', rootdir=datadir, num_channels=net.num_channels,\n image_size=net.image_size, download=False)\n\n ######################\n # Optimization setup #\n ######################\n opt_net = optim.Adam(net.tgt_net.parameters(), lr=lr, \n weight_decay=weight_decay, betas=betas)\n opt_dis = optim.Adam(net.discriminator.parameters(), lr=lr, \n weight_decay=weight_decay, betas=betas)\n opt_selector_content = optim.Adam(net.fc_selector.parameters(), lr=lr*0.1, \n weight_decay=weight_decay, betas=betas)\n opt_classifier = optim.Adam(net.classifier.parameters(), lr=lr*0.1,\n weight_decay=weight_decay, betas=betas)\n if domain_factor_cond != 0:\n opt_selector_domain_factor = optim.Adam(net.domain_factor_selector.parameters(), lr=lr*0.1,\n weight_decay=weight_decay, betas=betas)\n else:\n opt_selector_domain_factor = None\n\n #########\n # Train #\n #########\n # scheduled_ratio = lambda ep: (1. - initial_ratio) / ((num_epoch) ** power) * (ep ** power) + initial_ratio\n # scheduled_ratio = lambda ep: (1. - initial_ratio) / ((num_epoch + 30) ** power) * (ep ** power) + initial_ratio\n # scheduled_ratio = lambda ep: (1. - initial_ratio) / ((num_epoch + 30) ** power) * (ep ** power) + initial_ratio\n # Best\n # scheduled_ratio = lambda ep: (1. - initial_ratio) / ((num_epoch - 30) ** power) * (ep ** power) + initial_ratio\n scheduled_ratio = lambda ep: (1. - initial_ratio) / ((num_epoch - 30) ** power) * (ep ** power) + initial_ratio\n\n # train_tgt_loader = load_data_multi(tgt_list, 'train', batch=batch,\n # rootdir=datadir, num_channels=net.num_channels,\n # image_size=net.image_size, download=True, kwargs=kwargs)\n\n for epoch in range(num_epoch):\n\n # if epoch % 5 == 0:\n # os.makedirs(outdir, exist_ok=True)\n # outfile = join(outdir, 'scheduled_{:s}_net_{:s}_{:s}_ep_{}.pth'.format(\n # base_model, src, tgt, epoch))\n # print('Saving to', outfile)\n # net.save(outfile)\n\n # Calculate current domain ratio\n ratio = scheduled_ratio(epoch)\n\n actual_lr = ratio * lr\n\n for param_group in opt_net.param_groups:\n param_group['lr'] = actual_lr\n for param_group in opt_dis.param_groups:\n param_group['lr'] = actual_lr\n for param_group in opt_selector_content.param_groups:\n param_group['lr'] = actual_lr * 0.1\n for param_group in opt_classifier.param_groups:\n param_group['lr'] = actual_lr * 0.1\n if domain_factor_cond != 0:\n for param_group in opt_net.param_groups:\n param_group['lr'] = actual_lr * 0.1\n\n if ratio < 1:\n # Use sampler for data loading\n print('Epoch: {}, using sampler'.format(epoch))\n sampler = DomainScheduledSampler(train_tgt_set, sort_idx, ratio,\n initial_ratio, schedule_strategy, seed=epoch)\n train_tgt_loader = torch.utils.data.DataLoader(train_tgt_set, batch_size=batch,\n shuffle=False, sampler=sampler, **kwargs)\n else:\n print('Epoch: {}, using default'.format(epoch))\n train_tgt_loader = torch.utils.data.DataLoader(train_tgt_set, batch_size=batch, shuffle=True, **kwargs)\n\n err = train_epoch(train_src_data, train_tgt_loader, net, domain_factor_net, opt_net, opt_dis, opt_selector_content,\n opt_selector_domain_factor, opt_classifier, epoch, domain_factor_cond=domain_factor_cond)\n # if err == -1:\n # print(\"No suitable discriminator\")\n # break\n \n ##############\n # Save Model #\n ##############\n os.makedirs(outdir, exist_ok=True)\n outfile = join(outdir, 'scheduled_{:s}_net_{:s}_{:s}.pth'.format(\n base_model, src, tgt))\n print('Saving to', outfile)\n net.save(outfile)\n\n"
] |
[
[
"torch.cat",
"torch.utils.data.DataLoader",
"torch.matmul",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
nimish15shah/GRAPHOPT
|
[
"a4e70925bf59d0c802715b1c8696199d96d820ae"
] |
[
"src/sparse_linear_algebra/main.py"
] |
[
"\nfrom . import file_io\nfrom . import matrix_names_list\nimport networkx as nx\nfrom ..reporting_tools import reporting_tools\nfrom .. import common_classes\nfrom .. import ac_eval\nfrom .. import useful_methods\nfrom ..super_layer_generation.partition import status_node, layer_wise_partition_ASAP, CompileConfig\n\nimport time\nimport math\nimport scipy.io\nimport matplotlib.pylab as plt\nimport scipy as sp\nfrom scipy.sparse import linalg, csc_matrix, csr_matrix\nimport numpy as np\nimport itertools\nimport logging\n\nlogging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', level=logging.INFO)\nlogger= logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nclass Coarse_node_mappings():\n def __init__(self, coarse_n, adder_tree_set, mul_set, final_prod_node, b_key):\n self.coarse_n= coarse_n\n self.adder_tree_set= adder_tree_set\n self.mul_set= mul_set\n self.final_prod_node= final_prod_node\n self.b_key= b_key\n self.all_nodes= self.adder_tree_set | self.mul_set | set([self.final_prod_node])\n # self.full_row_graph = graph_nx.subgraph(self.all_nodes)\n\n self.map_n_to_semi_coarse= {}\n self.map_semi_coarse_to_tup= {}\n\n def create_coarse_nodes(self, graph, graph_nx, curr_partition):\n mul_nodes= curr_partition & self.mul_set\n adder_nodes= curr_partition & self.adder_tree_set\n nodes_mapped = curr_partition & self.all_nodes\n\n # print(len(self.all_nodes))\n curr_graph= graph_nx.subgraph(nodes_mapped)\n topological_list= list(nx.algorithms.dag.topological_sort(curr_graph)) \n topological_list_adders= [n for n in topological_list if graph[n].is_sum()]\n # these adders will be part of the mac operation\n # rest will be a separate adder-only tree operation\n adder_nodes_mac= set() \n adder_nodes_pure_tree= set()\n for n in topological_list_adders:\n predecessors = set(graph_nx.predecessors(n))\n # remove b_key from predecessor\n predecessors &= self.all_nodes\n\n # either predecessor is not in nodes_mapped\n # or one of its predecessor is itself in adder_nodes_pure_tree\n if len(predecessors - nodes_mapped) or len(predecessors & adder_nodes_pure_tree):\n adder_nodes_pure_tree.add(n)\n\n adder_nodes_mac = adder_nodes - adder_nodes_pure_tree\n \n n_coarse_nodes = 0\n operation_nodes_tup_ls= []\n if self.final_prod_node in curr_partition:\n n_coarse_nodes += 1\n operation_nodes_tup_ls.append(('final', set([self.final_prod_node])))\n\n if len(mul_nodes) !=0:\n n_coarse_nodes += 1\n operation_nodes_tup_ls.append(('mac', mul_nodes | adder_nodes_mac))\n\n if len(adder_nodes_pure_tree) !=0:\n n_coarse_nodes += 1\n operation_nodes_tup_ls.append(('add', adder_nodes_pure_tree))\n assert len(nodes_mapped) != len(self.all_nodes)\n\n assert len(operation_nodes_tup_ls) != 0\n return n_coarse_nodes, nodes_mapped, operation_nodes_tup_ls\n \nclass Matrix_statistics():\n def __init__(self, name):\n self.name= name\n self.U_or_L= None\n self.nrows= None\n self.ncols= None\n self.nnz= None\n\n self.critical_path_len_coarse= None\n self.critical_path_len_fine_tree= None\n self.critical_path_len_fine_chain= None\n self.critical_path_len_fine_hybrid= None\n\n # following should be lists,\n # where each element shows the number of nodes in that layer\n self.greedy_layer_wise_coarse= None\n self.greedy_layer_wise_fine_tree= None\n self.greedy_layer_wise_fine_chain= None\n self.greedy_layer_wise_fine_hybrid= None\n # as last as possible\n self.ALAP_layer_wise_coarse= None\n self.ALAP_layer_wise_fine_tree= None\n self.ALAP_layer_wise_fine_chain= None\n self.ALAP_layer_wise_fine_hybrid= None\n\n def get_str(self):\n stat_str = \"\"\n stat_str += self.name + ',' \n stat_str += self.U_or_L + ',' \n stat_str += f\"{self.nrows},{self.ncols},{self.nnz},\"\n stat_str += f\"critical_path_len_coarse, {self.critical_path_len_coarse},\"\n stat_str += f\"critical_path_len_fine_tree, {self.critical_path_len_fine_tree},\"\n stat_str += f\"critical_path_len_fine_chain, {self.critical_path_len_fine_chain},\"\n stat_str += f\"critical_path_len_fine_hybrid, {self.critical_path_len_fine_hybrid},\"\n\n return stat_str\n\ndef temp(global_var):\n name= matrix_names_list.matrix_names[0]\n\n mat= file_io.read_mat(global_var.SPARSE_MATRIX_MATLAB_PATH + name)\n reporting_tools.show_matrix(mat)\n lu = linalg.splu(mat)\n\n reporting_tools.show_matrix(lu.L.A)\n reporting_tools.show_matrix(lu.U.A)\n\n graph_nx= nx.convert_matrix.from_scipy_sparse_matrix(lu.U)\n graph_nx.remove_edges_from(nx.selfloop_edges(graph_nx))\n\n G= nx.DiGraph()\n G.add_nodes_from(graph_nx.nodes())\n for e in graph_nx.edges():\n e_new= sorted(list(e))\n G.add_edge(e_new[0], e_new[1])\n\n reporting_tools.plot_graph_nx_graphviz(G)\n reporting_tools.show_image()\n\nclass SparseTriangularSolve():\n def __init__(self, global_var, mtx_file_name, read_files= False, write_files= False, verify= False, graph_mode= 'FINE', output_mode= 'default', plot_matrix= False):\n assert output_mode in ['default', 'single_node']\n self.output_mode = output_mode\n\n self.global_var= global_var\n self.mtx_file_name= mtx_file_name\n\n try:\n self.A= file_io.read_mat(global_var.SPARSE_MATRIX_MATLAB_PATH + mtx_file_name)\n except:\n logger.warning(f\"not able to read file for matrix: {mtx_file_name}\")\n return\n\n print(\"matrix loaded\")\n self.nrows, self.ncols= self.A.shape\n self.nnz= self.A.count_nonzero()\n\n assert self.nrows == self.ncols\n\n # LU factors\n # Pr.T * L * U * Pc.T= A\n if not read_files:\n try:\n self.lu = linalg.splu(self.A)\n except:\n logger.warning(f\"LU decomposition failed for matrix {mtx_file_name}\")\n return\n\n self.L= self.lu.L # scipy csr matrix, self.L.A gives numpy array\n self.U= self.lu.U # scipy csr matrix\n # permutation matrices:\n self.Pr = csc_matrix((np.ones(self.nrows), (self.lu.perm_r, np.arange(self.nrows))))\n self.Pc = csc_matrix((np.ones(self.nrows), (np.arange(self.nrows), self.lu.perm_c)))\n\n if write_files:\n self.write_mm_file(self.L, self.mtx_file_name.replace('/', '_')+'_L')\n self.write_mm_file(self.U, self.mtx_file_name.replace('/', '_')+'_U')\n return\n\n else: # read the factors directly\n self.L= self.read_mm_file(self.mtx_file_name.replace('/', '_')+'_L')\n self.U= self.read_mm_file(self.mtx_file_name.replace('/', '_')+'_U')\n self.Pr= None\n self.Pc= None\n\n # reporting_tools.show_matrix(self.A.A)\n # reporting_tools.show_matrix(self.L.A)\n # reporting_tools.show_matrix(self.U.A)\n # graph_obj= self.create_digraph(self.L, 'L')\n # reporting_tools.plot_graph_nx_graphviz(graph_obj.graph_nx)\n # reporting_tools.show_image()\n # exit(1)\n\n # all nonzero on diagonal\n assert np.count_nonzero(self.L.diagonal()) == self.nrows\n assert np.count_nonzero(self.U.diagonal()) == self.nrows\n logger.info(f\"nrows: {self.nrows}, ncols:{self.ncols}, nnz:{self.nnz}\")\n\n # for matchin unsigned posit hw operaotr\n # self.unsigned_arithmetic(self.L)\n # self.unsigned_arithmetic(self.U)\n\n # self.statistics()\n # exit(1)\n\n if graph_mode == 'FINE':\n self.L_graph_obj, self.L_map_x_to_node, self.L_map_b_to_node, self.L_map_nz_idx_to_node, self.L_map_r_to_nodes_info= \\\n self.create_arithmetic_graph_directly('L', 'hybrid')\n logger.info(f\"Fine critical path length: {nx.algorithms.dag.dag_longest_path_length(self.L_graph_obj.graph_nx)}\")\n\n self.L_coarse_graph_obj= self.create_digraph(self.L, 'L')\n logger.info(f\"Coarse critical path length: {nx.algorithms.dag.dag_longest_path_length(self.L_coarse_graph_obj.graph_nx)}\")\n\n # self.U_coarse_graph_obj= self.create_digraph(self.U, 'U')\n # self.U_graph_obj, self.U_map_x_to_node, self.U_map_b_to_node, self.U_map_nz_idx_to_node, self.U_map_r_to_nodes_info= \\\n # self.create_arithmetic_graph_directly('U', 'chain')\n\n if verify:\n b= np.array([1.0 for _ in range(self.ncols)], dtype= 'double')\n self.lin_solve(b, verify= True)\n \n def statistics(self, write_files= False, file_path= None):\n if write_files:\n assert file_path != None\n\n stat_obj_L= Matrix_statistics(self.mtx_file_name)\n stat_obj_L.U_or_L= 'L'\n stat_obj_L.nrows, stat_obj_L.ncols= self.L.shape\n stat_obj_L.nnz= self.L.count_nonzero()\n\n stat_obj_U= Matrix_statistics(self.mtx_file_name)\n stat_obj_U.U_or_L= 'U'\n stat_obj_U.nrows, stat_obj_U.ncols= self.U.shape\n stat_obj_U.nnz= self.U.count_nonzero()\n\n self.L_coarse_graph_obj= self.create_digraph(self.L, 'L')\n self.U_coarse_graph_obj= self.create_digraph(self.U, 'U')\n stat_obj_L.critical_path_len_coarse= nx.algorithms.dag.dag_longest_path_length(self.L_coarse_graph_obj.graph_nx)\n stat_obj_U.critical_path_len_coarse= nx.algorithms.dag.dag_longest_path_length(self.U_coarse_graph_obj.graph_nx)\n\n # self.L_graph_obj, self.L_map_x_to_node, self.L_map_b_to_node, self.L_map_nz_idx_to_node, self.L_map_r_to_nodes_info= \\\n # self.create_arithmetic_graph_directly('L', 'tree')\n\n # self.U_graph_obj, self.U_map_x_to_node, self.U_map_b_to_node, self.U_map_nz_idx_to_node, self.U_map_r_to_nodes_info= \\\n # self.create_arithmetic_graph_directly('U', 'tree')\n # stat_obj_L.critical_path_len_fine_tree= nx.algorithms.dag.dag_longest_path_length(self.L_graph_obj.graph_nx)\n # stat_obj_U.critical_path_len_fine_tree= nx.algorithms.dag.dag_longest_path_length(self.U_graph_obj.graph_nx)\n \n # self.L_graph_obj, self.L_map_x_to_node, self.L_map_b_to_node, self.L_map_nz_idx_to_node, self.L_map_r_to_nodes_info= \\\n # self.create_arithmetic_graph_directly('L', 'chain')\n\n # self.U_graph_obj, self.U_map_x_to_node, self.U_map_b_to_node, self.U_map_nz_idx_to_node, self.U_map_r_to_nodes_info= \\\n # self.create_arithmetic_graph_directly('U', 'chain')\n # stat_obj_L.critical_path_len_fine_chain= nx.algorithms.dag.dag_longest_path_length(self.L_graph_obj.graph_nx)\n # stat_obj_U.critical_path_len_fine_chain= nx.algorithms.dag.dag_longest_path_length(self.U_graph_obj.graph_nx)\n\n # self.L_graph_obj, self.L_map_x_to_node, self.L_map_b_to_node, self.L_map_nz_idx_to_node, self.L_map_r_to_nodes_info= \\\n # self.create_arithmetic_graph_directly('L', 'hybrid')\n \n # b= np.array([1.0 for _ in range(self.ncols)], dtype= 'double')\n # self.tr_solve(b, 'L', verify= True)\n\n # self.U_graph_obj, self.U_map_x_to_node, self.U_map_b_to_node, self.U_map_nz_idx_to_node, self.U_map_r_to_nodes_info= \\\n # self.create_arithmetic_graph_directly('U', 'hybrid')\n # b= np.array([1.0 for _ in range(self.ncols)], dtype= 'double')\n # self.tr_solve(b, 'U', verify= True)\n \n stat_obj_L.critical_path_len_fine_hybrid= nx.algorithms.dag.dag_longest_path_length(self.L_graph_obj.graph_nx)\n # stat_obj_U.critical_path_len_fine_hybrid= nx.algorithms.dag.dag_longest_path_length(self.U_graph_obj.graph_nx)\n\n stat_str = self.mtx_file_name + ','\n stat_str += f\"{self.nrows},{self.ncols},{self.nnz},\"\n stat_str += stat_obj_L.get_str()\n stat_str += stat_obj_U.get_str()\n\n logger.info(stat_str)\n if write_files:\n with open(file_path, 'a') as f:\n print(stat_str, file=f)\n \n def unsigned_arithmetic(self, matrix):\n # nonzero() is a slower approach\n # convert to coo matrix for better speed\n rows,cols = matrix.nonzero()\n for row,col in zip(rows,cols):\n if row != col:\n matrix[row, col] = -abs(matrix[row, col])\n else:\n matrix[row, col] = abs(matrix[row, col])\n\n def create_digraph(self, matrix, U_or_L, start_from= None):\n \"\"\"\n if start_from == None, node id will match row id\n \"\"\"\n assert U_or_L in ['U', 'L']\n\n graph_nx= nx.convert_matrix.from_scipy_sparse_matrix(matrix)\n graph_nx.remove_edges_from(nx.selfloop_edges(graph_nx))\n\n if start_from != None:\n graph_nx, _ = useful_methods.relabel_nodes_with_contiguous_numbers(graph_nx, start=start_from)\n # graph_obj= common_classes.GraphClass(id_iter= itertools.count(start_from))\n # else:\n # graph_obj= common_classes.GraphClass(id_iter= itertools.count(0))\n\n G= nx.DiGraph()\n G.add_nodes_from(graph_nx.nodes())\n for e in graph_nx.edges():\n e_new= sorted(list(e))\n G.add_edge(e_new[0], e_new[1])\n \n if U_or_L== 'U':\n G= G.reverse()\n \n topological_list= list(nx.algorithms.dag.topological_sort(G))\n\n graph_obj= common_classes.GraphClass(id_iter= None)\n for n in topological_list:\n graph_obj.create_dummy_node(n)\n for c in G.predecessors(n):\n graph_obj.add_parent_child_edge(n, c)\n \n graph_obj.graph_nx= G\n\n assert nx.algorithms.dag.is_directed_acyclic_graph(graph_obj.graph_nx)\n\n return graph_obj\n\n def read_mm_file(self, name):\n \"\"\"\n NOTE: returns a scipy coo file and not a csr file\n \"\"\"\n path= self.global_var.SPARSE_MATRIX_MARKET_FACTORS_PATH + name\n logger.info(f\"reading {name} matrix from an mtm file at {path}\")\n m= scipy.io.mmread(path)\n m= m.tocsr()\n return m\n \n def write_mm_file(self, matrix, name):\n assert name != self.mtx_file_name\n path= self.global_var.SPARSE_MATRIX_MARKET_FACTORS_PATH + name\n logger.info(f\"writing {name} matrix to a mtm file at {path}\")\n scipy.io.mmwrite(path, matrix)\n\n def lin_solve(self, b, verify= False):\n \"finds x such that Ax=b via arithmetic graphs of L and U factors\"\n\n logger.info(\"Linear solve\")\n\n perm_b= (self.Pr * csr_matrix(b).transpose()).transpose()\n perm_b= perm_b.A[0]\n x= self.tr_solve(perm_b, 'L', verify)\n x= self.tr_solve(x, 'U', verify)\n x= (self.Pc * csr_matrix(x).transpose()).transpose()\n x= x.A[0]\n assert len(x) == len(b)\n\n if verify:\n x_golden= linalg.spsolve(self.A, b)\n x_golden_2= self.lu.solve(b)\n assert(np.allclose(b, self.A * csr_matrix(x).transpose().A))\n assert(np.allclose(x_golden, x_golden_2))\n assert(np.allclose(x_golden, x))\n\n logger.info(f\"First 10 elements of the lin_solve solution: {x[:10]}\")\n return x\n\n def tr_solve(self, b, U_or_L, verify= False):\n logger.info(\"Triangular solve\")\n assert U_or_L in ['U', 'L']\n assert len(b) == self.nrows\n\n if U_or_L == 'U':\n target_M= self.U\n graph_obj= self.U_graph_obj\n map_x_to_node= self.U_map_x_to_node\n map_b_to_node= self.U_map_b_to_node\n elif U_or_L == 'L':\n target_M= self.L\n graph_obj= self.L_graph_obj\n map_x_to_node= self.L_map_x_to_node\n map_b_to_node= self.L_map_b_to_node\n else:\n assert 0\n\n self.instantiate_b(U_or_L, b)\n\n ac_eval.ac_eval_non_recurse(graph_obj.graph, graph_obj.graph_nx)\n\n # get output\n x=[]\n for idx in range(self.ncols):\n node= map_x_to_node[idx]\n x.append(graph_obj.graph[node].curr_val)\n x= np.array(x)\n\n # verify\n if verify:\n x_golden= linalg.spsolve_triangular(target_M, b, lower= (U_or_L == 'L'))\n assert(np.allclose(x_golden, x))\n\n return x\n\n def create_imbalanced_tree(self, r, map_v_to_slack, map_v_to_reverse_lvl, pred):\n curr_slack = map_v_to_slack[r]\n ref_lvl= map_v_to_reverse_lvl[r]\n sorted_pred= sorted(pred, key= lambda x: map_v_to_reverse_lvl[x], reverse= True)\n \n map_curr_pred_to_slack= {}\n list_of_tree_leaves_list= []\n\n while sorted_pred:\n critical_pred= sorted_pred[0]\n assert map_v_to_reverse_lvl[critical_pred] < ref_lvl\n\n slack= curr_slack + (ref_lvl - map_v_to_reverse_lvl[critical_pred]) - 1\n\n # slack may become negative, this because of the way map_v_to_slack is computed\n if slack < 0: \n slack = 0\n \n # option_1: all pred with same lvl\n # same_lvl_pred= [p for p in sorted_pred if map_v_to_reverse_lvl[p] == map_v_to_reverse_lvl[critical_pred]]\n same_lvl_pred= []\n for p in sorted_pred:\n if map_v_to_reverse_lvl[p] == map_v_to_reverse_lvl[critical_pred]:\n same_lvl_pred.append(p)\n else:\n break\n \n # power of 2 as that is critical length of binary tree\n if len(same_lvl_pred) >= 2**slack:\n tree_preds= same_lvl_pred\n else:\n # option 2: preds according to slack\n tree_preds= sorted_pred[ : 2**slack]\n assert len(tree_preds) <= 2**slack\n\n # update variables to return\n list_of_tree_leaves_list.append(tree_preds)\n pred_slack= slack - useful_methods.clog2(len(tree_preds))\n for p in tree_preds:\n map_curr_pred_to_slack[p] = pred_slack\n \n # update sorted_pred for next iteration\n next_sorted_pred = sorted_pred[len(tree_preds) :]\n assert len(set(next_sorted_pred) | set(tree_preds)) == len(sorted_pred)\n assert len(set(next_sorted_pred) & set(tree_preds)) == 0\n sorted_pred = next_sorted_pred\n\n # the next tree in the chain will have to face one extra addition\n curr_slack -= 1\n\n return list_of_tree_leaves_list, map_curr_pred_to_slack\n\n\n def conservative_slack_on_every_node(self, map_v_to_reverse_lvl, graph_nx):\n \"\"\"\n Defines slack on every node of coarse graph, \n that can be used to unroll in a tree without affecting the critical length\n \"\"\"\n\n critical_path_len= max(list(map_v_to_reverse_lvl.values()))\n assert critical_path_len == nx.algorithms.dag.dag_longest_path_length(graph_nx)\n \n topological_list= list(nx.algorithms.dag.topological_sort(graph_nx))\n\n map_v_to_slack= {n: critical_path_len - map_v_to_reverse_lvl[n] for n in graph_nx}\n\n # successors before predecessors\n for n in reversed(topological_list):\n _, map_curr_pred_to_slack= self.create_imbalanced_tree(n, map_v_to_slack, map_v_to_reverse_lvl, list(graph_nx.predecessors(n)))\n for p, s in map_curr_pred_to_slack.items():\n if s < map_v_to_slack[p]:\n map_v_to_slack[p] = s\n\n # resulting slack can be negative or positive bacause reverse_lvl does not take into \n # account the numbers of MAC to be performed in each coarse node.\n # it just makes sure the lvl of successor is +1 lvl of all predecessors\n # making it 0 is complicated, because that would need another reassignment of slack with a new critical_path_len\n # Following cannot be asserted\n min_slack= min(list(map_v_to_slack.values()))\n assert min_slack <= 0\n assert max([map_v_to_slack[n] for n in nx.algorithms.dag.dag_longest_path(graph_nx)]) <= 0\n\n return map_v_to_slack\n\n def create_arithmetic_graph_directly(self, mode, reduction_mode):\n logger.info(f\"creating_arithmetic_graph for {mode} triangular matrix with reduction_mode: {reduction_mode}\")\n assert mode in ['U', 'L'] # upper triangle or lower triangle\n assert reduction_mode in ['tree', 'chain', 'hybrid']\n\n # dict that maps non-zero matrix variable to input nodes of graph\n # key: (row, col)\n # val: node id in the graph\n map_nz_idx_to_node={}\n \n # map variables of b vector in Ax=b to a node in the graph\n # key: idx in b\n # val: node id in the graph\n map_b_to_node={}\n\n # map variables of x vector in Ax=b to a node in the graph\n # key: idx in x\n # val: node id in the graph\n # Thesre are the outputs of the computation\n map_x_to_node={}\n\n # map row/col to nodes\n # key: row/col idx depending on U or L\n # val: obj of class Coarse_node_mappings\n map_r_to_nodes_info= {}\n\n start= time.time()\n if mode == 'U':\n # take anti-transpose to convert to an equivalent L\n #target_M= np.rot90(self.U.A,2).T\n target_M= self.U\n col_ptrs_ls= target_M.tolil().rows\n # iterate on 'U' in a reverse order\n iterate_list= reversed(list(enumerate(col_ptrs_ls)))\n elif mode == 'L':\n target_M= self.L\n col_ptrs_ls= target_M.tolil().rows\n iterate_list= enumerate(col_ptrs_ls)\n else:\n assert 0\n # logger.info(f\"A: {time.time() - start}\")\n\n start= time.time()\n if reduction_mode == 'hybrid' or reduction_mode == 'chain':\n graph_nx= self.create_digraph(target_M, mode).graph_nx\n map_v_to_reverse_lvl= useful_methods.compute_reverse_lvl(graph_nx)\n else:\n map_v_to_reverse_lvl= None\n # logger.info(f\"B1: {time.time() - start}\")\n\n start= time.time()\n if reduction_mode == 'hybrid':\n map_v_to_slack= self.conservative_slack_on_every_node(map_v_to_reverse_lvl, graph_nx)\n else:\n map_v_to_slack= None\n # logger.info(f\"B2: {time.time() - start}\")\n\n # row-wise list of column indices that have non-zero values\n\n print(f\"{target_M.count_nonzero()} nnz\")\n logger.info(\"build graph\")\n start= time.time()\n graph_obj= common_classes.GraphClass(id_iter= itertools.count())\n time_matrix= [0]\n time_rest= [0]\n for r, col_ptrs in iterate_list:\n assert len(col_ptrs) != 0 # there is atleast one non-zero in this row\n if mode == 'U':\n assert min(col_ptrs) <= r # matrix is infact L\n elif mode == 'L':\n assert max(col_ptrs) <= r # matrix is infact L\n else:\n assert 0\n\n row_prod_nodes, reduction_nodes, x_key, b_key, _, _ = \\\n self.create_arithmetic_graph_single_row(reduction_mode, graph_obj, r, target_M, map_x_to_node, map_b_to_node, map_nz_idx_to_node, time_matrix, time_rest,\n map_v_to_reverse_lvl, map_v_to_slack)\n\n coarse_node_info= Coarse_node_mappings(r, reduction_nodes, row_prod_nodes, x_key, b_key)\n map_r_to_nodes_info[r] = coarse_node_info\n\n # logger.info(f\"C: {time.time() - start}\")\n # logger.info(f\"C1: {time_matrix[0]}\")\n # logger.info(f\"C2: {time_rest[0]}\")\n\n if self.output_mode == 'single_node':\n logger.info(f\"output_mode: {self.output_mode}, adding dummy nodes in case the graph does not have a single output node\")\n\n # nodes with node parents\n output_nodes = [n for n, obj in graph_obj.graph.items() if len(obj.parent_key_list) == 0]\n assert len(output_nodes) != 0\n\n graph_len_before_dummy_nodes= len(graph_obj.graph)\n if len(output_nodes) > 1:\n graph_obj.create_tree_of_nodes(output_nodes, 'sum')\n\n logger.info(f\"{len(graph_obj.graph) - graph_len_before_dummy_nodes} dummy nodes added because there were {len(output_nodes)} output_nodes earlier\")\n logger.info(f\"{len(graph_obj.graph) - graph_len_before_dummy_nodes} dummy nodes out of {len(graph_obj.graph)} total nodes. Percentage: {(len(graph_obj.graph) - graph_len_before_dummy_nodes) *100 / len(graph_obj.graph)}\")\n else:\n logger.info(\"Dummy nodes not needed\")\n\n graph_obj.create_graph_nx()\n\n logger.info(f\"nnz of the {mode} triangular matrix: {target_M.count_nonzero()}, Length of the graph: {len(graph_obj.graph)}\")\n\n for r, _ in iterate_list:\n assert map_r_to_nodes_info[r].final_prod_node == map_x_to_node[r]\n \n return graph_obj, map_x_to_node, map_b_to_node, map_nz_idx_to_node, map_r_to_nodes_info\n \n def create_arithmetic_graph_single_row(self, mode, graph_obj, r, matrix, map_x_to_node, map_b_to_node, map_nz_idx_to_node, time_matrix, time_rest, map_v_to_reverse_lvl= None, map_v_to_slack= None):\n assert mode in ['chain', 'tree', 'hybrid']\n \n b_key= graph_obj.create_real_valued_leaf_node()\n map_b_to_node[r]= b_key\n\n start= time.time()\n row_matrix= matrix.getrow(r)\n row_indices= row_matrix.indices\n row_data= row_matrix.data\n time_matrix[0] += time.time() - start\n \n start= time.time()\n # key: column idx c\n row_prods={}\n row_prods_ls= [] # ls is needed because the order is important in create_chain_of_nodes\n\n for i, c in enumerate(row_indices):\n data= row_data[i]\n if r!= c: # except diagonal\n child_0= graph_obj.create_real_valued_leaf_node(val= -data) # negative of the actual val\n map_nz_idx_to_node[(r,c)] = child_0\n\n child_1= map_x_to_node[c]\n prod_node= graph_obj.create_2input_node(child_0, child_1, 'prod')\n row_prods[c]= prod_node\n row_prods_ls.append(prod_node)\n else:\n assert data != 0\n diag_elem= graph_obj.create_real_valued_leaf_node(val= 1.0/data) # inverse of diag \n map_nz_idx_to_node[(r,r)]= diag_elem\n\n reduction_nodes= set()\n reduction_graph_nx= nx.DiGraph()\n if mode== 'tree':\n reduction_head= graph_obj.create_tree_of_nodes([b_key] + row_prods_ls, 'sum', reduction_nodes, reduction_graph_nx)\n elif mode == 'chain':\n assert map_v_to_reverse_lvl != None\n pred= [c for c in row_indices if r != c]\n sorted_pred= sorted(pred, key= lambda x: map_v_to_reverse_lvl[x], reverse= True)\n chain_leaves = [b_key] + [row_prods[p] for p in reversed(sorted_pred)]\n # if row_indices[0] == r: # U matrix\n # # order is reversed so that the resulting chain \n # # will minimize the critical path of the triangular solve\n # chain_leaves= [b_key] + list(reversed(row_prods_ls))\n # elif row_indices[-1] == r: # L matrix\n # chain_leaves= [b_key] + row_prods_ls\n # else:\n # assert 0\n reduction_head= graph_obj.create_chain_of_nodes(chain_leaves, 'sum', reduction_nodes, reduction_graph_nx)\n if len(pred) == 0:\n assert reduction_head == b_key\n\n elif mode == 'hybrid':\n assert map_v_to_reverse_lvl != None\n assert map_v_to_slack != None\n\n curr_slack = map_v_to_slack[r]\n ref_lvl= map_v_to_reverse_lvl[r]\n pred= [c for c in row_indices if r != c]\n\n list_of_tree_leaves_list, _ = self.create_imbalanced_tree(r, map_v_to_slack, map_v_to_reverse_lvl, pred)\n time_rest[0] += time.time() - start\n tree_heads= []\n for tree_preds in list_of_tree_leaves_list:\n tree_leaves= [row_prods[p] for p in tree_preds]\n tree_head= graph_obj.create_tree_of_nodes(tree_leaves, 'sum', reduction_nodes, reduction_graph_nx)\n tree_heads.append(tree_head)\n\n chain_leaves = [b_key] + list(reversed(tree_heads)) # reverse because of the way chain is created\n reduction_head= graph_obj.create_chain_of_nodes(chain_leaves, 'sum', reduction_nodes, reduction_graph_nx)\n\n if len(pred) == 0:\n assert reduction_head == b_key\n else:\n assert 0\n\n # prod with inverse of the diagonal element\n x_key= graph_obj.create_2input_node(reduction_head, diag_elem,'prod')\n map_x_to_node[r] = x_key\n \n row_prod_nodes= set(row_prods_ls)\n all_nodes= reduction_nodes | set([x_key]) | row_prod_nodes\n\n return row_prod_nodes, reduction_nodes, x_key, b_key, all_nodes, reduction_graph_nx\n\n def from_coarse_graph_partitions_to_arithmetic_graph_partitions(self, coarse_list_of_partitions, matrix, coarse_graph):\n N_PE = len(coarse_list_of_partitions)\n assert N_PE != 0\n\n # DISTRIBUTE_THRESHOLD = max(1, N_PE/4)\n # NOTE: the way 'tree' mode is handeled, only DISTRIBUTE_THRESHOLD=1 is allowed\n DISTRIBUTE_THRESHOLD = 1\n \n start_from= 1\n graph_obj= common_classes.GraphClass(id_iter= itertools.count(start_from))\n # dict that maps non-zero matrix variable to input nodes of graph\n # key: (row, col)\n # val: node id in the graph\n map_nz_idx_to_node={}\n \n # map variables of b vector in Ax=b to a node in the graph\n # key: idx in b\n # val: node id in the graph\n map_b_to_node={}\n\n # map variables of x vector in Ax=b to a node in the graph\n # key: idx in x\n # val: node id in the graph\n # Thesre are the outputs of the computation\n map_x_to_node={}\n\n coarse_list_of_partitions_transpose= [[coarse_list_of_partitions[pe][layer] for pe in range(N_PE)] for layer in range(len(coarse_list_of_partitions[0]))]\n list_of_partitions= []\n for a_layer_of_parts in coarse_list_of_partitions_transpose:\n n_coarse_nodes= sum([len(part) for part in a_layer_of_parts])\n if n_coarse_nodes > DISTRIBUTE_THRESHOLD: # one coarse node to one PE\n mode= 'chain'\n curr_partitions= [set() for _ in range(N_PE)]\n for pe, part in enumerate(a_layer_of_parts):\n for coarse_n in part:\n _, _, _,_, all_nodes, _= \\\n self.create_arithmetic_graph_single_row(mode, graph_obj, coarse_n, matrix, map_x_to_node, map_b_to_node, map_nz_idx_to_node)\n curr_partitions[pe] |= all_nodes\n list_of_partitions.append(curr_partitions)\n\n else: # distribute a coarse nodes across PEs\n mode= 'tree'\n \n # this curr_partitions can have multiple layers\n # hence, it is a list of lists\n list_of_curr_partitions= [[set() for _ in range(N_PE)]]\n config_obj= CompileConfig(N_PE= N_PE)\n for pe, part in enumerate(a_layer_of_parts):\n for coarse_n in part:\n nnz_prods, reduction_nodes, x_key,_, _, reduction_graph_nx = \\\n self.create_arithmetic_graph_single_row(mode, graph_obj, coarse_n, matrix, map_x_to_node, map_b_to_node, map_nz_idx_to_node)\n\n # nnz_prods can go to any PE in parallel\n chunk_size= (len(nnz_prods) + N_PE -1 ) // N_PE\n chunked_parts= [set(list(nnz_prods)[i:i + chunk_size]) for i in range(0, len(nnz_prods), chunk_size)]\n\n # nnz prods always go to 0th list of the list_of_curr_partitions\n list_of_curr_partitions[0]= [list_of_curr_partitions[0][pe] | chunked_parts[pe] for pe in range(N_PE)]\n \n # reduction_nodes \n status_dict= {n: status_node(n) for n in reduction_graph_nx}\n reduction_list_of_partitions= layer_wise_partition_ASAP(reduction_graph_nx, status_dict, config_obj)\n \n # add extra layers to list_of_curr_partitions,\n # if there are more layers in list_of_partitions.\n # This allows adaptive increase in size of list_of_curr_partitions as needed.\n # +1 is to account for layers for nnz_prods \n for l in range(len(reduction_list_of_partitions[0]) - len(list_of_curr_partitions) + 2):\n list_of_curr_partitions.append([set() for _ in range(N_PE)])\n \n for pe, parts in enumerate(reduction_list_of_partitions):\n for l, part in enumerate(parts):\n list_of_curr_partitions[l][pe] |= part\n\n # final x_key to the 0th PE. Could have randomized PE selection.\n list_of_curr_partitions[1 + len(reduction_list_of_partitions)][0].add(x_key)\n\n list_of_partitions += list_of_curr_partitions\n\n graph_obj.create_graph_nx()\n\n # sanity checks\n nnz= len(matrix.indices)\n assert len(graph_obj.graph) == 3*nnz\n assert len([n for l in list_of_partitions for part in l for n in part]) == 2*nnz\n\n # transpose to make it same as list_of_partitions from other functions\n list_of_partitions_transpose= [[list_of_partitions[pe][layer] for pe in range(N_PE)] for layer in range(len(list_of_partitions[0]))]\n list_of_partitions= list_of_partitions_transpose\n\n return graph_obj, map_x_to_node, map_b_to_node, map_nz_idx_to_node, list_of_partitions\n\n def instantiate_b(self, U_or_L, b= None):\n assert U_or_L in ['U', 'L']\n\n if U_or_L == 'U':\n graph_obj= self.U_graph_obj\n map_b_to_node= self.U_map_b_to_node\n elif U_or_L == 'L':\n graph_obj= self.L_graph_obj\n map_b_to_node= self.L_map_b_to_node\n else:\n assert 0\n\n if b is None:\n b= [1.0 for r in range(self.nrows)]\n\n # instead of directly iterating over dict, \n # generate fixed sequence of indices based on nrows\n for r in range(self.nrows):\n node= map_b_to_node[r]\n graph_obj.graph[node].curr_val= b[r]\n\n \n def coarsen_partitions(self, graph, graph_nx, list_of_partitions, map_r_to_nodes_info):\n logger.info(\"Coarsening partitions\")\n N_PE= len(list_of_partitions)\n assert N_PE != 0\n \n n_layers= len(list_of_partitions[0])\n assert n_layers != 0\n\n total_nodes= set([n for pe in range(N_PE) for l in range(len(list_of_partitions[0])) for n in list_of_partitions[pe][l]])\n n_total_nodes= len(total_nodes) \n\n print(len(total_nodes))\n print(len(useful_methods.get_non_leaves(graph_nx)))\n assert total_nodes == set(useful_methods.get_non_leaves(graph_nx))\n\n logger.info(f\"lenght of partitions: {[len(list_of_partitions[pe][l]) for pe in range(N_PE) for l in range(n_layers)]}\")\n\n # reverse map\n map_n_to_r= {}\n for r, coarse_node_info in map_r_to_nodes_info.items():\n for n in coarse_node_info.all_nodes:\n assert n not in map_n_to_r\n map_n_to_r[n] = r\n assert len(map_n_to_r) == n_total_nodes\n\n tot_coarse_nodes= 0\n id_iter= itertools.count(0)\n map_semi_coarse_to_tup= {}\n map_n_to_semi_coarse= {}\n total_fine_n_mapped= 0\n list_of_partitions_coarse= [[] for _ in range(N_PE)]\n done_nodes= set()\n for pe in range(N_PE):\n for l in range(n_layers):\n curr_partition= set(list_of_partitions[pe][l])\n semi_coarse_n_set= set()\n while len(curr_partition) != 0:\n n= curr_partition.pop() # remember this element is removed!\n r= map_n_to_r[n]\n n_coarse_nodes, nodes_mapped, operation_nodes_tup_ls= map_r_to_nodes_info[r].create_coarse_nodes(graph, graph_nx, curr_partition | set([n]))\n tot_coarse_nodes += n_coarse_nodes\n curr_partition -= nodes_mapped\n done_nodes |= nodes_mapped\n assert n_coarse_nodes == len(operation_nodes_tup_ls)\n\n nodes_mapped_assert= set()\n for tup in operation_nodes_tup_ls:\n semi_coarse_n= next(id_iter)\n semi_coarse_n_set.add(semi_coarse_n)\n\n map_semi_coarse_to_tup[semi_coarse_n] = tup\n map_r_to_nodes_info[r].map_semi_coarse_to_tup[semi_coarse_n] = tup\n for fine_n in tup[1]:\n nodes_mapped_assert.add(fine_n)\n assert fine_n not in map_n_to_semi_coarse\n map_n_to_semi_coarse[fine_n] = semi_coarse_n\n map_r_to_nodes_info[r].map_n_to_semi_coarse[fine_n] = semi_coarse_n\n\n assert nodes_mapped_assert == nodes_mapped\n \n total_fine_n_mapped += len(nodes_mapped)\n\n list_of_partitions_coarse[pe].append(semi_coarse_n_set)\n\n assert len(done_nodes) == n_total_nodes\n\n logger.info(f\"total_fine_n_mapped: {total_fine_n_mapped} out of n_total_nodes : {n_total_nodes}\")\n assert total_fine_n_mapped == n_total_nodes\n logger.info(f\"Totol coarse nodes: {tot_coarse_nodes}, tot rows: {len(map_r_to_nodes_info)}\")\n\n logger.info(f\"lenght of partitions coarse: {[len(list_of_partitions_coarse[pe][l]) for pe in range(N_PE) for l in range(n_layers) ]}\")\n\n # create semi coarse graph\n semi_coarse_g= nx.DiGraph()\n semi_coarse_g.add_nodes_from(list(map_semi_coarse_to_tup.keys()))\n done_edges= set()\n done_coarse_nodes= set()\n for u,v in graph_nx.edges():\n if not graph[u].is_leaf():\n coarse_u= map_n_to_semi_coarse[u]\n coarse_v= map_n_to_semi_coarse[v]\n if coarse_u != coarse_v:\n # print(coarse_u, u, v, map_semi_coarse_to_tup[coarse_u])\n ALLOWED= False\n if coarse_u in done_coarse_nodes:\n # print(\"repeat\",coarse_u, u, v, map_semi_coarse_to_tup[coarse_u])\n if map_semi_coarse_to_tup[coarse_u][0] == 'final':\n assert (coarse_u, coarse_v) not in done_edges, f\"{coarse_u, coarse_v}, len(done_edges)\"\n ALLOWED = True\n else:\n assert map_semi_coarse_to_tup[coarse_v][0] != 'mac'\n else:\n ALLOWED= True\n\n if ALLOWED:\n semi_coarse_g.add_edge(coarse_u, coarse_v)\n done_edges.add((coarse_u, coarse_v))\n done_coarse_nodes.add(coarse_u)\n else:\n assert u not in map_n_to_semi_coarse\n\n # remove 0/1 input adder nodes\n removed_nodes= set()\n candidate_nodes= set([n for n in semi_coarse_g.nodes() if map_semi_coarse_to_tup[n][0] == 'add'])\n while candidate_nodes:\n n= candidate_nodes.pop()\n n_type= map_semi_coarse_to_tup[n][0]\n pred= list(semi_coarse_g.predecessors(n))\n if n==1482:\n print(n_type, pred)\n if n_type == 'add' and len(pred) <= 1:\n succ= list(semi_coarse_g.successors(n))\n assert len(succ) == 1\n succ= succ[0]\n\n if len(pred) == 1:\n semi_coarse_g.add_edge(pred[0], succ)\n else: # one of the predecessor of succ will be deleted, hence it now becomes a candidate node\n if map_semi_coarse_to_tup[succ][0] == 'add':\n candidate_nodes.add(succ)\n \n semi_coarse_g.remove_node(n)\n removed_nodes.add(n)\n\n\n # also remove the nodes from list_of_partitions_coarse\n for pe in range(N_PE):\n for l in range(n_layers):\n curr_partition= list_of_partitions_coarse[pe][l] - removed_nodes\n list_of_partitions_coarse[pe][l]= curr_partition\n\n logger.info(f\"size of semi_coarse_g after removing useless add nodes: {len(semi_coarse_g)}\")\n\n # assertions\n assert (len(semi_coarse_g)) != 0\n for coarse_u, coarse_v in semi_coarse_g.edges():\n u_type= map_semi_coarse_to_tup[coarse_u][0]\n v_type= map_semi_coarse_to_tup[coarse_v][0]\n\n assert not(u_type == 'mac' and v_type == 'mac')\n assert not(u_type == 'add' and v_type == 'mac')\n assert not(u_type == 'final' and v_type == 'add')\n\n if v_type == 'mac':\n assert u_type == 'final'\n\n for coarse_n in semi_coarse_g:\n n_type= map_semi_coarse_to_tup[coarse_n][0]\n if n_type == 'add' or n_type == 'mac':\n assert len(list(semi_coarse_g.successors(coarse_n))) != 0\n if n_type == 'add':\n pred= list(semi_coarse_g.predecessors(coarse_n))\n assert len(pred) > 1\n\n assert nx.algorithms.dag.is_directed_acyclic_graph(semi_coarse_g)\n\n logger.info(f\"Non leaf coarse edges: {semi_coarse_g.number_of_edges()}\")\n\n return list_of_partitions_coarse, semi_coarse_g, map_n_to_semi_coarse, map_r_to_nodes_info, map_n_to_r, map_semi_coarse_to_tup\n"
] |
[
[
"numpy.allclose",
"scipy.sparse.linalg.splu",
"scipy.sparse.linalg.spsolve",
"numpy.arange",
"scipy.sparse.csr_matrix",
"numpy.ones",
"scipy.sparse.linalg.spsolve_triangular",
"numpy.array"
]
] |
harshgeek4coder/Smart-Rural-Ecosystem
|
[
"a7d5036d8ef7dde91cd3d04492c98d0b29fc28b0"
] |
[
"app/routes.py"
] |
[
"from flask import render_template , flash , redirect , url_for , request\nimport csv\nimport pandas as pd\nfrom app import app\nfrom app.forms import RegForm , LoginForm\nfrom app.models import User\nfrom app import app, db, pwd\nfrom flask_login import login_user , current_user , logout_user , login_required\n\nfrom app.tweet_Scraper import Import_tweet_sentiment\nfrom app.jobs_scraper import find_jobs_from,clean_job_titles\nfrom app.crop_forecast_profile import crop_profile,TopFiveLosers,TopFiveWinners\n\nfrom app.crop_prod import get_estimate_yield\nfrom app import disease_prediction\nfrom app.disease_prediction import predict_disease\nfrom app.symptoms import get_precautions,get_description\n\ntw_obj=Import_tweet_sentiment()\ndesired_chars=['titles','companies','links','date_listed']\n\ndata_sever='app/static/Medical/Symptom_severity.csv'\ndata_precaution='app/static/Medical/symptom_precaution.csv'\ndata_desc='app/static/Medical/symptom_Description.csv'\n\n\ndf_prec=pd.read_csv(data_precaution)\ndf_prec.columns=['Disease','Precaution1','Precaution2','Precaution3','Precaution4']\n\ndescr=pd.read_csv(data_desc)\ndescr.columns=['Disease','Description']\n\n\nwith open('app/static/medical_Testing.csv', newline='') as f:\n reader = csv.reader(f)\n symptoms = next(reader)\n symptoms = symptoms[:len(symptoms)-1]\n\n\n\[email protected]('/')\ndef homepage():\n return render_template(\"information.html\" , )\n\[email protected](\"/signup\" , methods = ['GET' , 'POST'])\ndef signuppage() :\n if current_user.is_authenticated :\n flash(\"You are already logged in.\" , \"warning\")\n return redirect(url_for(\"homepage\"))\n form = RegForm(request.form)\n if request.method == \"POST\" and form.validate():\n hashed = pwd.generate_password_hash(form.password.data).decode('utf-8')\n element = User(uname = form.uname.data , email = form.email.data , password = hashed)\n db.session.add(element)\n db.session.commit()\n flash(\"Account created for %s!\" % (form.uname.data) , \"success\")\n return redirect(url_for(\"loginpage\"))\n return render_template(\"signup.html\" , form = form)\n\[email protected](\"/login\" , methods = ['GET' , 'POST'])\ndef loginpage():\n if current_user.is_authenticated :\n flash(\"You are already logged in.\" , \"warning\")\n return redirect(url_for(\"homepage\"))\n form = LoginForm(request.form)\n if request.method == \"POST\" and form.validate():\n member = User.query.filter_by(uname = form.uname.data).first()\n if member and pwd.check_password_hash(member.password , form.password.data) :\n login_user(member)\n flash(\"Welcome, %s!\" % (form.uname.data) , \"success\")\n return redirect(url_for(\"homepage\"))\n else :\n flash(\"Username or Password doesn't match, please try again.\" , \"danger\")\n return redirect(url_for(\"loginpage\"))\n return render_template(\"login.html\" , form = form)\n\[email protected](\"/tweet_updates\" , methods = ['GET' , 'POST'])\ndef tweet_updates():\n if current_user.is_authenticated : \n\n text = request.form.get('text','MoRD_GOI')\n all_tweets=tw_obj.get_hashtag(text)\n\n return render_template(\"tweet.html\" ,all_tweets=all_tweets,query=text)\n\n else :\n flash(\"Username or Password doesn't match, please try again.\" , \"danger\")\n return redirect(url_for(\"loginpage\"))\n \n return render_template(\"tweet.html\" ,all_tweets=all_tweets,query=text)\n\n \n\[email protected](\"/jobs_updates\" , methods = ['GET' , 'POST'])\ndef jobs_updates():\n\n if current_user.is_authenticated : \n\n job_title = request.form.get('job_title','intern')\n job_location = request.form.get('job_location','india')\n\n jobs_list, num_listings=find_jobs_from(\"Indeed\",job_title,job_location,desired_chars)\n\n words=clean_job_titles(jobs_list['titles'])\n\n res=zip(words,jobs_list['companies'],jobs_list['links'],jobs_list['date_listed'])\n res=list(res) \n\n return render_template(\"jobs.html\" ,res=res,num_listings=num_listings) \n\n else :\n flash(\"Username or Password doesn't match, please try again.\" , \"danger\")\n return redirect(url_for(\"loginpage\")) \n\n return render_template(\"jobs.html\" ,res=res,num_listings=num_listings)\n\n\n\[email protected](\"/crop_analysis\" , methods = ['GET' , 'POST'])\ndef crop_analysis():\n if current_user.is_authenticated :\n\n crop_name = request.form.get('crop_name','arhar')\n\n crop_context=crop_profile(crop_name)\n\n topfive=TopFiveWinners()\n bottomfive=TopFiveLosers()\n\n return render_template(\"crop_analysis.html\", context=crop_context,topfive=topfive,bottomfive=bottomfive)\n\n else :\n flash(\"Username or Password doesn't match, please try again.\" , \"danger\")\n return redirect(url_for(\"loginpage\")) \n \n\n return render_template(\"crop_analysis.html\", context=crop_context,topfive=topfive,bottomfive=bottomfive)\n\n\n\n\n\[email protected](\"/crop_production_estimate\" , methods = ['GET' , 'POST'])\ndef crop_production_estimate():\n if current_user.is_authenticated :\n\n temp = request.form.get('temp',0)\n area= request.form.get('area',0)\n topfive=TopFiveWinners()\n bottomfive=TopFiveLosers()\n\n possible_prod,st=get_estimate_yield(temp,area)\n\n\n\n return render_template(\"crop_prod_estimate.html\", possible_prod=possible_prod,topfive=topfive,bottomfive=bottomfive,st=st)\n\n else :\n flash(\"Username or Password doesn't match, please try again.\" , \"danger\")\n return redirect(url_for(\"loginpage\")) \n \n\n return render_template(\"crop_prod_estimate.html\", possible_prod=possible_prod)\n\n\n\n\n\[email protected](\"/medical_consult\" , methods = ['GET' , 'POST'])\ndef medical_consult():\n if current_user.is_authenticated :\n if request.method == 'POST':\n selected_symptoms = [] \n if(request.form['Symptom1']!=\"\") and (request.form['Symptom1'] not in selected_symptoms):\n selected_symptoms.append(request.form['Symptom1'])\n if(request.form['Symptom2']!=\"\") and (request.form['Symptom2'] not in selected_symptoms):\n selected_symptoms.append(request.form['Symptom2'])\n if(request.form['Symptom3']!=\"\") and (request.form['Symptom3'] not in selected_symptoms):\n selected_symptoms.append(request.form['Symptom3'])\n if(request.form['Symptom4']!=\"\") and (request.form['Symptom4'] not in selected_symptoms):\n selected_symptoms.append(request.form['Symptom4'])\n if(request.form['Symptom5']!=\"\") and (request.form['Symptom5'] not in selected_symptoms):\n selected_symptoms.append(request.form['Symptom5'])\n\n disease = predict_disease(selected_symptoms)\n prec1,prec2,prec3,prec4=get_precautions(disease[0])\n descript=get_description(disease[0])\n\n return render_template(\"medical_consult.html\",disease=disease[0],symptoms=symptoms , prec1=prec1 , prec2=prec2 , prec3=prec3 ,prec4=prec4 ,descript=descript)\n else:\n\n return render_template(\"medical_consult.html\",symptoms=symptoms)\n \n else :\n flash(\"Username or Password doesn't match, please try again.\" , \"danger\")\n return redirect(url_for(\"loginpage\")) \n \n\n return render_template(\"medical_consult.html\" ,symptoms=symptoms)\n\n\n\n\n\n\n\n\n\[email protected](\"/logout\")\ndef logoutpage():\n logout_user()\n flash(\"Successfuly logged out.\" , \"success\")\n return redirect(url_for(\"homepage\"))\n\[email protected](\"/member-page\")\n@login_required\ndef member():\n return render_template(\"members.html\")"
] |
[
[
"pandas.read_csv"
]
] |
sparkzsolutions/The-Dark-Onion-Crawler
|
[
"56f52127acc7ff4151d455dd1f007638ad0e795d"
] |
[
"NudeNet/nudenet/detector.py"
] |
[
"import os\r\nimport cv2\r\nimport pydload\r\nimport logging\r\nimport numpy as np\r\nimport onnxruntime\r\nfrom progressbar import progressbar\r\n\r\nfrom .detector_utils import preprocess_image\r\nfrom .video_utils import get_interest_frames_from_video\r\n\r\n\r\ndef dummy(x):\r\n return x\r\n\r\n\r\nFILE_URLS = {\r\n \"default\": {\r\n \"checkpoint\": \"https://github.com/notAI-tech/NudeNet/releases/download/v0/detector_v2_default_checkpoint.onnx\",\r\n \"classes\": \"https://github.com/notAI-tech/NudeNet/releases/download/v0/detector_v2_default_classes\",\r\n },\r\n \"base\": {\r\n \"checkpoint\": \"https://github.com/notAI-tech/NudeNet/releases/download/v0/detector_v2_base_checkpoint.onnx\",\r\n \"classes\": \"https://github.com/notAI-tech/NudeNet/releases/download/v0/detector_v2_base_classes\",\r\n },\r\n}\r\n\r\n\r\nclass Detector:\r\n detection_model = None\r\n classes = None\r\n\r\n def __init__(self, model_name=\"default\"):\r\n \"\"\"\r\n model = Detector()\r\n \"\"\"\r\n checkpoint_url = FILE_URLS[model_name][\"checkpoint\"]\r\n classes_url = FILE_URLS[model_name][\"classes\"]\r\n\r\n home = os.path.expanduser(\"~\")\r\n model_folder = os.path.join(home, f\".NudeNet/\")\r\n if not os.path.exists(model_folder):\r\n os.makedirs(model_folder)\r\n\r\n checkpoint_name = os.path.basename(checkpoint_url)\r\n checkpoint_path = os.path.join(model_folder, checkpoint_name)\r\n classes_path = os.path.join(model_folder, \"classes\")\r\n\r\n if not os.path.exists(checkpoint_path):\r\n print(\"Downloading the checkpoint to\", checkpoint_path)\r\n pydload.dload(checkpoint_url, save_to_path=checkpoint_path, max_time=None)\r\n\r\n if not os.path.exists(classes_path):\r\n print(\"Downloading the classes list to\", classes_path)\r\n pydload.dload(classes_url, save_to_path=classes_path, max_time=None)\r\n\r\n self.detection_model = onnxruntime.InferenceSession(checkpoint_path)\r\n\r\n self.classes = [c.strip() for c in open(classes_path).readlines() if c.strip()]\r\n\r\n def detect_video(\r\n self, video_path, mode=\"default\", min_prob=0.6, batch_size=2, show_progress=True\r\n ):\r\n frame_indices, frames, fps, video_length = get_interest_frames_from_video(\r\n video_path\r\n )\r\n logging.debug(\r\n f\"VIDEO_PATH: {video_path}, FPS: {fps}, Important frame indices: {frame_indices}, Video length: {video_length}\"\r\n )\r\n if mode == \"fast\":\r\n frames = [\r\n preprocess_image(frame, min_side=480, max_side=800) for frame in frames\r\n ]\r\n else:\r\n frames = [preprocess_image(frame) for frame in frames]\r\n\r\n scale = frames[0][1]\r\n frames = [frame[0] for frame in frames]\r\n all_results = {\r\n \"metadata\": {\r\n \"fps\": fps,\r\n \"video_length\": video_length,\r\n \"video_path\": video_path,\r\n },\r\n \"preds\": {},\r\n }\r\n\r\n progress_func = progressbar\r\n\r\n if not show_progress:\r\n progress_func = dummy\r\n\r\n for _ in progress_func(range(int(len(frames) / batch_size) + 1)):\r\n batch = frames[:batch_size]\r\n batch_indices = frame_indices[:batch_size]\r\n frames = frames[batch_size:]\r\n frame_indices = frame_indices[batch_size:]\r\n if batch_indices:\r\n outputs = self.detection_model.run(\r\n [s_i.name for s_i in self.detection_model.get_outputs()],\r\n {self.detection_model.get_inputs()[0].name: np.asarray(batch)},\r\n )\r\n\r\n labels = [op for op in outputs if op.dtype == \"int32\"][0]\r\n scores = [op for op in outputs if isinstance(op[0][0], np.float32)][0]\r\n boxes = [op for op in outputs if isinstance(op[0][0], np.ndarray)][0]\r\n\r\n boxes /= scale\r\n for frame_index, frame_boxes, frame_scores, frame_labels in zip(\r\n frame_indices, boxes, scores, labels\r\n ):\r\n if frame_index not in all_results[\"preds\"]:\r\n all_results[\"preds\"][frame_index] = []\r\n\r\n for box, score, label in zip(\r\n frame_boxes, frame_scores, frame_labels\r\n ):\r\n if score < min_prob:\r\n continue\r\n box = box.astype(int).tolist()\r\n label = self.classes[label]\r\n\r\n all_results[\"preds\"][frame_index].append(\r\n {\r\n \"box\": [int(c) for c in box],\r\n \"score\": float(score),\r\n \"label\": label,\r\n }\r\n )\r\n\r\n return all_results\r\n\r\n def detect(self, img_path, mode=\"default\", min_prob=None):\r\n if mode == \"fast\":\r\n image, scale = preprocess_image(img_path, min_side=480, max_side=800)\r\n if not min_prob:\r\n min_prob = 0.5\r\n else:\r\n image, scale = preprocess_image(img_path)\r\n if not min_prob:\r\n min_prob = 0.6\r\n\r\n outputs = self.detection_model.run(\r\n [s_i.name for s_i in self.detection_model.get_outputs()],\r\n {self.detection_model.get_inputs()[0].name: np.expand_dims(image, axis=0)},\r\n )\r\n\r\n labels = [op for op in outputs if op.dtype == \"int32\"][0]\r\n scores = [op for op in outputs if isinstance(op[0][0], np.float32)][0]\r\n boxes = [op for op in outputs if isinstance(op[0][0], np.ndarray)][0]\r\n\r\n boxes /= scale\r\n processed_boxes = []\r\n for box, score, label in zip(boxes[0], scores[0], labels[0]):\r\n if score < min_prob:\r\n continue\r\n box = box.astype(int).tolist()\r\n label = self.classes[label]\r\n processed_boxes.append(\r\n {\"box\": [int(c) for c in box], \"score\": float(score), \"label\": label}\r\n )\r\n\r\n return processed_boxes\r\n\r\n def censor(self, img_path, out_path=None, visualize=False, parts_to_blur=[]):\r\n if not out_path and not visualize:\r\n print(\r\n \"No out_path passed and visualize is set to false. There is no point in running this function then.\"\r\n )\r\n return\r\n\r\n image = cv2.imread(img_path)\r\n boxes = self.detect(img_path)\r\n\r\n if parts_to_blur:\r\n boxes = [i[\"box\"] for i in boxes if i[\"label\"] in parts_to_blur]\r\n else:\r\n boxes = [i[\"box\"] for i in boxes]\r\n\r\n for box in boxes:\r\n part = image[box[1] : box[3], box[0] : box[2]]\r\n image = cv2.rectangle(\r\n image, (box[0], box[1]), (box[2], box[3]), (0, 0, 0), cv2.FILLED\r\n )\r\n\r\n if visualize:\r\n cv2.imshow(\"Blurred image\", image)\r\n cv2.waitKey(0)\r\n\r\n if out_path:\r\n cv2.imwrite(out_path, image)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n m = Detector()\r\n print(m.detect(\"/Users/bedapudi/Desktop/n2.jpg\"))\r\n"
] |
[
[
"numpy.asarray",
"numpy.expand_dims"
]
] |
akanametov/dcgan-pytorch
|
[
"20f85607db04f62f0b18beb8ea42fc757069350c"
] |
[
"utils.py"
] |
[
"import os\nimport urllib\nimport zipfile\nfrom tqdm import tqdm\n\nimport torch\nfrom torch import nn\n\n\ndef download(url: str, filename: str, chunk_size: int = 4096) -> None:\n print(f'Downloading {url} ...')\n with open(filename, \"wb\") as fh:\n with urllib.request.urlopen(urllib.request.Request(url)) as response:\n with tqdm(total=response.length) as pbar:\n for chunk in iter(lambda: response.read(chunk_size), \"\"):\n if not chunk:\n break\n pbar.update(chunk_size)\n fh.write(chunk)\n return None\n\n\ndef extract(from_path: str, to_path: str) -> None:\n print(f'Extracting {from_path} ...')\n with zipfile.ZipFile(from_path, \"r\", compression=zipfile.ZIP_STORED) as zf:\n zf.extractall(to_path)\n return None\n\n\ndef download_and_extract(root: str, url: str, filename: str=None):\n root = os.path.expanduser(root)\n if not filename:\n filename = os.path.basename(url)\n fpath = os.path.join(root, filename)\n if os.path.exists(fpath):\n print('Dataset is already downloaded.')\n else:\n os.makedirs(root, exist_ok=True)\n _ = download(url, fpath)\n _ = extract(fpath, root)\n return None\n\n\ndef initialize_weights(m):\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02) \n if isinstance(m, nn.BatchNorm2d):\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0)\n"
] |
[
[
"torch.nn.init.constant_",
"torch.nn.init.normal_"
]
] |
TomDLT/himalaya-1
|
[
"58eab7fc17e7471462a9ba7917e7034a5f6bfacc"
] |
[
"himalaya/backend/torch.py"
] |
[
"from functools import partial\n\ntry:\n import torch\nexcept ImportError as error:\n import sys\n if \"pytest\" in sys.modules: # if run through pytest\n import pytest\n pytest.skip(\"PyTorch not installed.\")\n raise ImportError(\"PyTorch not installed.\") from error\n\nfrom ._utils import _dtype_to_str\n\n###############################################################################\n\n\ndef apply_argmax(array, argmax, axis):\n \"\"\"Apply precomputed argmax indices in multi dimension arrays\n\n array[np.argmax(array)] works fine in dimension 1, but not in higher ones.\n This function tackles this issue.\n\n Examples\n --------\n >>> import torch\n >>> array = torch.randn(10, 4, 8)\n >>> argmax = torch.argmax(array, axis=1)\n >>> max_ = apply_argmax(array, argmax, axis=1)\n >>> assert torch.all(max_ == torch.max(array, axis=1).values)\n \"\"\"\n argmax = argmax.unsqueeze(dim=axis)\n max_ = torch.gather(array, dim=axis, index=argmax)\n return torch.squeeze(max_, dim=axis)\n\n\ndef std_float64(X, axis=None, demean=True, keepdims=False):\n \"\"\"Compute the standard deviation of X with double precision,\n and cast back the result to original dtype.\n \"\"\"\n X_64 = torch.as_tensor(X, dtype=torch.float64)\n X_std = (X_64 ** 2).sum(dim=axis, dtype=torch.float64)\n if demean:\n X_std -= X_64.sum(axis, dtype=torch.float64) ** 2 / X.shape[axis]\n X_std = X_std ** .5\n X_std /= (X.shape[axis] ** .5)\n\n X_std = torch.as_tensor(X_std, dtype=X.dtype, device=X.device)\n if keepdims:\n X_std = X_std.unsqueeze(dim=axis)\n\n return X_std\n\n\ndef mean_float64(X, axis=None, keepdims=False):\n \"\"\"Compute the mean of X with double precision,\n and cast back the result to original dtype.\n \"\"\"\n X_mean = X.sum(axis, dtype=torch.float64) / X.shape[axis]\n\n X_mean = torch.as_tensor(X_mean, dtype=X.dtype, device=X.device)\n if keepdims:\n X_mean = X_mean.unsqueeze(dim=axis)\n return X_mean\n\n\n###############################################################################\n\nname = \"torch\"\nargmax = torch.argmax\nrandn = torch.randn\nrand = torch.rand\nmatmul = torch.matmul\nstack = torch.stack\nabs = torch.abs\nsum = torch.sum\nsqrt = torch.sqrt\nany = torch.any\nall = torch.all\nnan = torch.tensor(float('nan'))\ninf = torch.tensor(float('inf'))\nisnan = torch.isnan\nisinf = torch.isinf\nlogspace = torch.logspace\nconcatenate = torch.cat\nbool = torch.bool\nint32 = torch.int32\nfloat32 = torch.float32\nfloat64 = torch.float64\nlog = torch.log\nexp = torch.exp\narange = torch.arange\nunique = torch.unique\neinsum = torch.einsum\ntanh = torch.tanh\npower = torch.pow\nprod = torch.prod\nsign = torch.sign\nclip = torch.clamp\nfinfo = torch.finfo\n\n\ndef atleast_1d(array):\n array = asarray(array)\n if array.ndim == 0:\n array = array[None]\n return array\n\n\ndef flip(array, axis=0):\n return torch.flip(array, dims=[axis])\n\n\ndef sort(array):\n return torch.sort(array).values\n\n\ndef diagonal_view(array, axis1=0, axis2=1):\n \"\"\"Return a view of the array diagonal.\"\"\"\n return torch.diagonal(array, 0, dim1=axis1, dim2=axis2)\n\n\ndef to_numpy(array):\n try:\n return array.cpu().numpy()\n except AttributeError:\n return array\n\n\ndef to_cpu(array):\n return array.cpu()\n\n\ndef to_gpu(array, device=None):\n return array\n\n\ndef isin(x, y):\n import numpy as np # XXX\n np_result = np.isin(x.cpu().numpy(), y.cpu().numpy())\n return asarray(np_result, dtype=torch.bool, device=x.device)\n\n\ndef searchsorted(x, y):\n import numpy as np # XXX\n np_result = np.searchsorted(x.cpu().numpy(), y.cpu().numpy())\n return asarray(np_result, dtype=torch.int64, device=x.device)\n\n\ndef flatnonzero(x):\n return torch.nonzero(torch.flatten(x), as_tuple=True)[0]\n\n\ndef asarray(x, dtype=None, device=None):\n if dtype is None:\n if isinstance(x, torch.Tensor):\n dtype = x.dtype\n if hasattr(x, \"dtype\") and hasattr(x.dtype, \"name\"):\n dtype = x.dtype.name\n if dtype is not None:\n dtype = _dtype_to_str(dtype)\n dtype = getattr(torch, dtype)\n if device is None and isinstance(x, torch.Tensor):\n device = x.device\n\n try:\n tensor = torch.as_tensor(x, dtype=dtype, device=device)\n except Exception:\n import numpy as np\n array = np.asarray(x, dtype=_dtype_to_str(dtype))\n tensor = torch.as_tensor(array, dtype=dtype, device=device)\n return tensor\n\n\ndef asarray_like(x, ref):\n return torch.as_tensor(x, dtype=ref.dtype, device=ref.device)\n\n\ndef norm(x, ord=\"fro\", axis=None, keepdims=False):\n return torch.norm(x, p=ord, dim=axis, keepdim=keepdims)\n\n\ndef copy(x):\n return x.clone()\n\n\ndef transpose(a, axes=None):\n if axes is None:\n return a.t()\n else:\n return a.permute(*axes)\n\n\ndef max(*args, **kwargs):\n res = torch.max(*args, **kwargs)\n if isinstance(res, torch.Tensor):\n return res\n else:\n return res.values\n\n\ndef min(*args, **kwargs):\n res = torch.min(*args, **kwargs)\n if isinstance(res, torch.Tensor):\n return res\n else:\n return res.values\n\n\ndef zeros(shape, dtype=\"float32\", device=\"cpu\"):\n if isinstance(shape, int):\n shape = (shape, )\n if isinstance(dtype, str):\n dtype = getattr(torch, dtype)\n return torch.zeros(shape, dtype=dtype, device=device)\n\n\ndef zeros_like(array, shape=None, dtype=None, device=None):\n \"\"\"Add a shape parameter in zeros_like.\"\"\"\n if shape is None:\n shape = array.shape\n if isinstance(shape, int):\n shape = (shape, )\n if isinstance(dtype, str):\n dtype = getattr(torch, dtype)\n if dtype is None:\n dtype = array.dtype\n if device is None:\n device = array.device\n return torch.zeros(shape, dtype=dtype, device=device, layout=array.layout)\n\n\ndef ones_like(array, shape=None, dtype=None, device=None):\n \"\"\"Add a shape parameter in ones_like.\"\"\"\n if shape is None:\n shape = array.shape\n if isinstance(shape, int):\n shape = (shape, )\n if isinstance(dtype, str):\n dtype = getattr(torch, dtype)\n if dtype is None:\n dtype = array.dtype\n if device is None:\n device = array.device\n return torch.ones(shape, dtype=dtype, device=device, layout=array.layout)\n\n\ndef full_like(array, fill_value, shape=None, dtype=None, device=None):\n \"\"\"Add a shape parameter in full_like.\"\"\"\n if shape is None:\n shape = array.shape\n if isinstance(shape, int):\n shape = (shape, )\n if isinstance(dtype, str):\n dtype = getattr(torch, dtype)\n if dtype is None:\n dtype = array.dtype\n if device is None:\n device = array.device\n return torch.full(shape, fill_value, dtype=dtype, device=device,\n layout=array.layout)\n\n\ndef check_arrays(*all_inputs):\n \"\"\"Change all inputs into Tensors (or list of Tensors) using the same\n precision and device as the first one. Some tensors can be None.\n \"\"\"\n all_tensors = []\n all_tensors.append(asarray(all_inputs[0]))\n dtype = all_tensors[0].dtype\n device = all_tensors[0].device\n for tensor in all_inputs[1:]:\n if tensor is None:\n pass\n elif isinstance(tensor, list):\n tensor = [asarray(tt, dtype=dtype, device=device) for tt in tensor]\n else:\n tensor = asarray(tensor, dtype=dtype, device=device)\n all_tensors.append(tensor)\n return all_tensors\n\n\ndef svd(X, full_matrices=True):\n U, s, V = torch.svd(X, some=not full_matrices)\n return U, s, V.transpose(-2, -1)\n\n\ntry:\n eigh = torch.linalg.eigh\nexcept AttributeError:\n # torch.__version__ < 1.8\n eigh = partial(torch.symeig, eigenvectors=True)\n"
] |
[
[
"torch.diagonal",
"torch.norm",
"torch.ones",
"torch.max",
"torch.full",
"torch.zeros",
"torch.svd",
"torch.min",
"torch.sort",
"torch.flatten",
"torch.gather",
"torch.flip",
"torch.squeeze",
"torch.as_tensor"
]
] |
ZoeYou/hypernym-path-generation
|
[
"da6b717e06ed9cd90625bbf352fa372958f808cc"
] |
[
"seq2seq/print_output_beam.py"
] |
[
"from nltk.corpus import wordnet as wn\nimport re\nimport sys\nimport pandas as pd\nfrom tqdm import tqdm\n\nin_fname = sys.argv[1] #$RESULTS.txt\nout_fname = sys.argv[2] #$RESULTS.out.txt\nis_WN = False\n\n# ------------------------------- #\n# NB. Output column order:\n# ------------------------------- #\n# relation\n#\n# node (hyponym)\n# node_lexname\n#\n# gold (hypernym)\n# gold_lexname\n#\n# pred (hypernym)\n# pred_lexname\n#\n# is_gold_wn18rr_dev\n# is_gold_wordnet\n#\n# lexname identity (wn18rr_dev)\n# lexname identity (wordnet)\n#\n# Wu & Palmer similarity (wn18rr_dev)\n# Wu & Palmer similarity (wordnet)\n# ------------------------------- #\n\ndef pred_gold_ident(pred_syn, gold_syn):\n \"\"\"\n 1. check if the predicted synset equals to the gold synsets in validation set\n 2. check if the predicted synset is in the hypernyms set of wordNet\n \"\"\"\n is_gold_dev = pred_syn == gold_syn # needs revision (cases where multiple golds exist in WN18RR's train & dev set)\n is_gold_wn = pred_syn in node_syn.hypernyms() or pred_syn in node_syn.instance_hypernyms()\n return is_gold_dev, is_gold_wn\n\n\ndef lex_ident(node_syn, pred_syn, gold_syn):\n \"\"\"\n 1. check if the predicted synset's lexname equals to the gold synset's name\n 2. check if the predicted synset's lexname is in the set of its wordnet hypernyms' lexnames (including hypernyms/instance_hypernyms)\n \"\"\"\n pred_lex = pred_syn.lexname()\n gold_lex = gold_syn.lexname()\n\n lex_ident_dev = pred_lex == gold_lex\n lex_ident_wn = pred_lex in [x.lexname() for x in node_syn.instance_hypernyms()] \\\n or pred_lex in [x.lexname() for x in node_syn.hypernyms()]\n return lex_ident_dev, lex_ident_wn, pred_lex, gold_lex\n\n\ndef wup_score(pred_syn, gold_syn):\n \"\"\" Calculate the wup score for predicted synset and gold synset\"\"\"\n if pred_syn == gold_syn:\n wup_dev = 1.00\n else:\n wup_dev = pred_syn.wup_similarity(gold_syn)\n if wup_dev is None:\n wup_dev = 0.00\n return wup_dev\n\n\ndef nb_pred_in_gold(pred_syns, gold_syns):\n cnt = 0\n for pred_syn in pred_syns:\n if pred_syn in gold_syns:\n cnt += 1\n return cnt \n\n\ndef mean_max_wup_score(pred_syns, gold_syns, is_wn = True):\n if is_wn:\n res = []\n for pred in pred_syns:\n wup_wn = [wup_score(pred, hyper) for hyper in gold_syns]\n if len(wup_wn) == 0 or all(score is None for score in wup_wn):\n wup_wn_max = 0.00\n else:\n wup_wn_max = max(wup_wn)\n res.append(wup_wn_max)\n return sum(res) / len(res)\n else:\n return 0.00\n\n\ndef hits_at_k(pred_labels, gold_labels, k):\n top_k = pred_labels[:k]\n res = sum([gold in top_k for gold in gold_labels]) / len(gold_labels)\n if len(gold_labels) <= k:\n return res\n else:\n return res * (len(gold_labels)/k)\n\n\n\n \n\n \n\nif is_WN:\n with open(in_fname, 'r') as f:\n corpus = []\n for line in f:\n line = re.sub('\\n', '', line)\n if line != u'':\n corpus.append(line)\n\n with open(out_fname, 'w') as rerank_file:\n rerank_file.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n' \\\n .format('relation', 'node', 'gold_syns', 'pred_k',\n 'hits@1','hits@3','hits@10', 'mean_max_wup'))\n\n for line in tqdm(corpus): # each predicted pairs \n node, rel_raw, gold, pred_k = line.split('\\t')\n pred_k = pred_k.split('|')\n \n rel_raw = rel_raw[1:]\n \n # Load node and gold synsets from WordNet in NLTK\n node_syn = wn.synset(node)\n gold_syns = node_syn.hypernyms() + node_syn.instance_hypernyms()\n \n # If the line has no prediction at all: pred = '<unk>'\n #for i in range(len(pred)):\n for i in range(len(pred_k)):\n if pred_k[i] == '':\n pred_k[i] = '<unk>'\n\n # Relation type definition\n if rel_raw == 'hypernym':\n if '.v.' in node:\n rel = rel_raw + '_v'\n else: # '.n.' in node:\n rel = rel_raw + '_n'\n else: # 'instance_hypernym'\n rel = rel_raw\n\n pred_syns = [wn.synset(pred) for pred in pred_k if pred != '<unk>'] \n mean_max_wup = mean_max_wup_score(pred_syns, gold_syns, is_wn = True)\n hits_at_1 = hits_at_k(pred_syns, gold_syns, 1)\n hits_at_3 = hits_at_k(pred_syns, gold_syns, 3)\n hits_at_10 = hits_at_k(pred_syns, gold_syns, 10)\n\n gold_syns = [syn.name() for syn in gold_syns]\n\n rerank_file.write('{}\\t{}\\t{}\\t{}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.5f}\\n' \\\n .format(rel, node, '|'.join(gold_syns), '|'.join(pred_k), hits_at_1, hits_at_3, hits_at_10, mean_max_wup))\n \nelse:\n corpus = pd.read_csv(in_fname, sep='\\t', names=[\"node\", \"relation\", \"gold_syns\", \"pred_k\"])\n\n with open(out_fname, 'w') as rerank_file:\n rerank_file.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n' \\\n .format('relation', 'node', 'gold_syns', 'pred_k',\n 'hits@1','hits@3','hits@10', 'mean_max_wup'))\n\n for index, line in tqdm(corpus.iterrows()): \n node = line.node\n rel = line.relation[1:]\n gold_syns = list(set(corpus[corpus['node'] == node]['gold_syns'].tolist()))\n pred_syns = line.pred_k.split('|')\n\n mean_max_wup = mean_max_wup_score(pred_syns, gold_syns, is_wn = False)\n hits_at_1 = hits_at_k(pred_syns, gold_syns, 1)\n hits_at_3 = hits_at_k(pred_syns, gold_syns, 3)\n hits_at_10 = hits_at_k(pred_syns, gold_syns, 10)\n\n rerank_file.write('{}\\t{}\\t{}\\t{}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.5f}\\n' \\\n .format(rel, node, '|'.join(gold_syns), '|'.join(pred_syns), hits_at_1, hits_at_3, hits_at_10, mean_max_wup))\n\n"
] |
[
[
"pandas.read_csv"
]
] |
klasocki/flair
|
[
"8d0ec268cf0d7f2b08c617f60296d9c7853b633f"
] |
[
"flair/models/sequence_tagger_model.py"
] |
[
"import logging\nfrom pathlib import Path\n\nimport logging\nfrom pathlib import Path\nfrom typing import List, Union, Optional, Callable, Dict\n\nimport numpy as np\nimport torch\nimport torch.nn\nimport torch.nn.functional as F\nfrom tabulate import tabulate\nfrom torch.nn.parameter import Parameter\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport flair.nn\nfrom flair.data import Dictionary, Sentence, Token, Label, space_tokenizer\nfrom flair.datasets import SentenceDataset, StringDataset\nfrom flair.embeddings import TokenEmbeddings\nfrom flair.file_utils import cached_path\nfrom flair.training_utils import Metric, Result, store_embeddings\n\nlog = logging.getLogger(\"flair\")\n\nSTART_TAG: str = \"<START>\"\nSTOP_TAG: str = \"<STOP>\"\n\n\ndef to_scalar(var):\n return var.view(-1).detach().tolist()[0]\n\n\ndef argmax(vec):\n _, idx = torch.max(vec, 1)\n return to_scalar(idx)\n\n\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n\n\ndef argmax_batch(vecs):\n _, idx = torch.max(vecs, 1)\n return idx\n\n\ndef log_sum_exp_batch(vecs):\n maxi = torch.max(vecs, 1)[0]\n maxi_bc = maxi[:, None].repeat(1, vecs.shape[1])\n recti_ = torch.log(torch.sum(torch.exp(vecs - maxi_bc), 1))\n return maxi + recti_\n\n\ndef pad_tensors(tensor_list):\n ml = max([x.shape[0] for x in tensor_list])\n shape = [len(tensor_list), ml] + list(tensor_list[0].shape[1:])\n template = torch.zeros(*shape, dtype=torch.long, device=flair.device)\n lens_ = [x.shape[0] for x in tensor_list]\n for i, tensor in enumerate(tensor_list):\n template[i, : lens_[i]] = tensor\n\n return template, lens_\n\n\nclass SequenceTagger(flair.nn.Model):\n def __init__(\n self,\n hidden_size: int,\n embeddings: TokenEmbeddings,\n tag_dictionary: Dictionary,\n tag_type: str,\n use_crf: bool = True,\n use_rnn: bool = True,\n rnn_layers: int = 1,\n dropout: float = 0.0,\n word_dropout: float = 0.05,\n locked_dropout: float = 0.5,\n train_initial_hidden_state: bool = False,\n rnn_type: str = \"LSTM\",\n pickle_module: str = \"pickle\",\n beta: float = 1.0,\n weights: Dict = None,\n ):\n \"\"\"\n Initializes a SequenceTagger\n :param hidden_size: number of hidden states in RNN\n :param embeddings: word embeddings used in tagger\n :param tag_dictionary: dictionary of tags you want to predict\n :param tag_type: string identifier for tag type\n :param use_crf: if True use CRF decoder, else project directly to tag space\n :param use_rnn: if True use RNN layer, otherwise use word embeddings directly\n :param rnn_layers: number of RNN layers\n :param dropout: dropout probability\n :param word_dropout: word dropout probability\n :param locked_dropout: locked dropout probability\n :param train_initial_hidden_state: if True, trains initial hidden state of RNN\n :param beta: Parameter for F-beta score for evaluation and training annealing\n :param weights: Weights for classes for the loss function\n\n \"\"\"\n\n super(SequenceTagger, self).__init__()\n self.beta = beta\n self.use_rnn = use_rnn\n self.hidden_size = hidden_size\n self.use_crf: bool = use_crf\n self.rnn_layers: int = rnn_layers\n\n self.trained_epochs: int = 0\n\n self.embeddings = embeddings\n\n # set the dictionaries\n self.tag_dictionary: Dictionary = tag_dictionary\n self.tag_type: str = tag_type\n self.tagset_size: int = len(tag_dictionary)\n\n # Initialize the weight tensor\n if weights is not None:\n n_classes = len(self.tag_dictionary)\n weight_list = [1. for i in range(n_classes)]\n for i, tag in enumerate(self.tag_dictionary.get_items()):\n if tag in weights.keys():\n weight_list[i] = weights[tag]\n self.weights = torch.FloatTensor(weight_list).to(flair.device)\n else:\n self.weights = None\n\n\n\n # initialize the network architecture\n self.nlayers: int = rnn_layers\n self.hidden_word = None\n\n # dropouts\n self.use_dropout: float = dropout\n self.use_word_dropout: float = word_dropout\n self.use_locked_dropout: float = locked_dropout\n\n self.pickle_module = pickle_module\n\n if dropout > 0.0:\n self.dropout = torch.nn.Dropout(dropout)\n\n if word_dropout > 0.0:\n self.word_dropout = flair.nn.WordDropout(word_dropout)\n\n if locked_dropout > 0.0:\n self.locked_dropout = flair.nn.LockedDropout(locked_dropout)\n\n rnn_input_dim: int = self.embeddings.embedding_length\n\n self.relearn_embeddings: bool = True\n\n if self.relearn_embeddings:\n self.embedding2nn = torch.nn.Linear(rnn_input_dim, rnn_input_dim)\n\n self.train_initial_hidden_state = train_initial_hidden_state\n self.bidirectional = True\n self.rnn_type = rnn_type\n\n # bidirectional LSTM on top of embedding layer\n if self.use_rnn:\n num_directions = 2 if self.bidirectional else 1\n\n if self.rnn_type in [\"LSTM\", \"GRU\"]:\n\n self.rnn = getattr(torch.nn, self.rnn_type)(\n rnn_input_dim,\n hidden_size,\n num_layers=self.nlayers,\n dropout=0.0 if self.nlayers == 1 else 0.5,\n bidirectional=True,\n batch_first=True,\n )\n # Create initial hidden state and initialize it\n if self.train_initial_hidden_state:\n self.hs_initializer = torch.nn.init.xavier_normal_\n\n self.lstm_init_h = Parameter(\n torch.randn(self.nlayers * num_directions, self.hidden_size),\n requires_grad=True,\n )\n\n self.lstm_init_c = Parameter(\n torch.randn(self.nlayers * num_directions, self.hidden_size),\n requires_grad=True,\n )\n\n # TODO: Decide how to initialize the hidden state variables\n # self.hs_initializer(self.lstm_init_h)\n # self.hs_initializer(self.lstm_init_c)\n\n # final linear map to tag space\n self.linear = torch.nn.Linear(\n hidden_size * num_directions, len(tag_dictionary)\n )\n else:\n self.linear = torch.nn.Linear(\n self.embeddings.embedding_length, len(tag_dictionary)\n )\n\n if self.use_crf:\n self.transitions = torch.nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size)\n )\n\n self.transitions.detach()[\n self.tag_dictionary.get_idx_for_item(START_TAG), :\n ] = -10000\n\n self.transitions.detach()[\n :, self.tag_dictionary.get_idx_for_item(STOP_TAG)\n ] = -10000\n\n self.to(flair.device)\n\n def _get_state_dict(self):\n model_state = {\n \"state_dict\": self.state_dict(),\n \"embeddings\": self.embeddings,\n \"hidden_size\": self.hidden_size,\n \"train_initial_hidden_state\": self.train_initial_hidden_state,\n \"tag_dictionary\": self.tag_dictionary,\n \"tag_type\": self.tag_type,\n \"use_crf\": self.use_crf,\n \"use_rnn\": self.use_rnn,\n \"rnn_layers\": self.rnn_layers,\n \"use_word_dropout\": self.use_word_dropout,\n \"use_locked_dropout\": self.use_locked_dropout,\n \"rnn_type\": self.rnn_type,\n \"beta\": self.beta\n }\n return model_state\n\n @staticmethod\n def _init_model_with_state_dict(state):\n\n rnn_type = \"LSTM\" if not \"rnn_type\" in state.keys() else state[\"rnn_type\"]\n use_dropout = 0.0 if not \"use_dropout\" in state.keys() else state[\"use_dropout\"]\n use_word_dropout = (\n 0.0 if not \"use_word_dropout\" in state.keys() else state[\"use_word_dropout\"]\n )\n use_locked_dropout = (\n 0.0\n if not \"use_locked_dropout\" in state.keys()\n else state[\"use_locked_dropout\"]\n )\n train_initial_hidden_state = (\n False\n if not \"train_initial_hidden_state\" in state.keys()\n else state[\"train_initial_hidden_state\"]\n )\n beta = 1.0 if \"beta\" not in state.keys() else state[\"beta\"]\n\n model = SequenceTagger(\n hidden_size=state[\"hidden_size\"],\n embeddings=state[\"embeddings\"],\n tag_dictionary=state[\"tag_dictionary\"],\n tag_type=state[\"tag_type\"],\n use_crf=state[\"use_crf\"],\n use_rnn=state[\"use_rnn\"],\n rnn_layers=state[\"rnn_layers\"],\n dropout=use_dropout,\n word_dropout=use_word_dropout,\n locked_dropout=use_locked_dropout,\n train_initial_hidden_state=train_initial_hidden_state,\n rnn_type=rnn_type,\n beta=beta,\n )\n model.load_state_dict(state[\"state_dict\"])\n return model\n\n def predict(\n self,\n sentences: Union[List[Sentence], Sentence, List[str], str],\n mini_batch_size=32,\n embedding_storage_mode=\"none\",\n all_tag_prob: bool = False,\n verbose: bool = False,\n use_tokenizer: Union[bool, Callable[[str], List[Token]]] = space_tokenizer,\n ) -> List[Sentence]:\n \"\"\"\n Predict sequence tags for Named Entity Recognition task\n :param sentences: a Sentence or a string or a List of Sentence or a List of string.\n :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,\n up to a point when it has no more effect.\n :param embedding_storage_mode: 'none' for the minimum memory footprint, 'cpu' to store embeddings in Ram,\n 'gpu' to store embeddings in GPU memory.\n :param all_tag_prob: True to compute the score for each tag on each token,\n otherwise only the score of the best tag is returned\n :param verbose: set to True to display a progress bar\n :param use_tokenizer: a custom tokenizer when string are provided (default is space based tokenizer).\n :return: List of Sentence enriched by the predicted tags\n \"\"\"\n with torch.no_grad():\n if not sentences:\n return sentences\n\n if isinstance(sentences, Sentence) or isinstance(sentences, str):\n sentences = [sentences]\n\n if (flair.device.type == \"cuda\") and embedding_storage_mode == \"cpu\":\n log.warning(\n \"You are inferring on GPU with parameter 'embedding_storage_mode' set to 'cpu'.\"\n \"This option will slow down your inference, usually 'none' (default value) \"\n \"is a better choice.\"\n )\n\n # reverse sort all sequences by their length\n rev_order_len_index = sorted(\n range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True\n )\n original_order_index = sorted(\n range(len(rev_order_len_index)), key=lambda k: rev_order_len_index[k]\n )\n\n reordered_sentences: List[Union[Sentence, str]] = [\n sentences[index] for index in rev_order_len_index\n ]\n\n if isinstance(sentences[0], Sentence):\n # remove previous embeddings\n store_embeddings(reordered_sentences, \"none\")\n dataset = SentenceDataset(reordered_sentences)\n else:\n dataset = StringDataset(\n reordered_sentences, use_tokenizer=use_tokenizer\n )\n dataloader = DataLoader(\n dataset=dataset, batch_size=mini_batch_size, collate_fn=lambda x: x\n )\n\n if self.use_crf:\n transitions = self.transitions.detach().cpu().numpy()\n else:\n transitions = None\n\n # progress bar for verbosity\n if verbose:\n dataloader = tqdm(dataloader)\n\n results: List[Sentence] = []\n for i, batch in enumerate(dataloader):\n\n if verbose:\n dataloader.set_description(f\"Inferencing on batch {i}\")\n results += batch\n batch = self._filter_empty_sentences(batch)\n # stop if all sentences are empty\n if not batch:\n continue\n\n feature: torch.Tensor = self.forward(batch)\n tags, all_tags = self._obtain_labels(\n feature=feature,\n batch_sentences=batch,\n transitions=transitions,\n get_all_tags=all_tag_prob,\n )\n\n for (sentence, sent_tags) in zip(batch, tags):\n for (token, tag) in zip(sentence.tokens, sent_tags):\n token.add_tag_label(self.tag_type, tag)\n\n # all_tags will be empty if all_tag_prob is set to False, so the for loop will be avoided\n for (sentence, sent_all_tags) in zip(batch, all_tags):\n for (token, token_all_tags) in zip(sentence.tokens, sent_all_tags):\n token.add_tags_proba_dist(self.tag_type, token_all_tags)\n\n # clearing token embeddings to save memory\n store_embeddings(batch, storage_mode=embedding_storage_mode)\n\n results: List[Union[Sentence, str]] = [\n results[index] for index in original_order_index\n ]\n assert len(sentences) == len(results)\n return results\n\n def evaluate(\n self,\n data_loader: DataLoader,\n out_path: Path = None,\n embedding_storage_mode: str = \"none\",\n ) -> (Result, float):\n\n if type(out_path) == str:\n out_path = Path(out_path)\n\n with torch.no_grad():\n eval_loss = 0\n\n batch_no: int = 0\n\n metric = Metric(\"Evaluation\", beta=self.beta)\n\n lines: List[str] = []\n\n if self.use_crf:\n transitions = self.transitions.detach().cpu().numpy()\n else:\n transitions = None\n\n for batch in data_loader:\n batch_no += 1\n\n with torch.no_grad():\n features = self.forward(batch)\n loss = self._calculate_loss(features, batch)\n tags, _ = self._obtain_labels(\n feature=features,\n batch_sentences=batch,\n transitions=transitions,\n get_all_tags=False,\n )\n\n eval_loss += loss\n\n for (sentence, sent_tags) in zip(batch, tags):\n for (token, tag) in zip(sentence.tokens, sent_tags):\n token: Token = token\n token.add_tag_label(\"predicted\", tag)\n\n # append both to file for evaluation\n eval_line = \"{} {} {} {}\\n\".format(\n token.text,\n token.get_tag(self.tag_type).value,\n tag.value,\n tag.score,\n )\n lines.append(eval_line)\n lines.append(\"\\n\")\n for sentence in batch:\n # make list of gold tags\n gold_tags = [\n (tag.tag, str(tag)) for tag in sentence.get_spans(self.tag_type)\n ]\n # make list of predicted tags\n predicted_tags = [\n (tag.tag, str(tag)) for tag in sentence.get_spans(\"predicted\")\n ]\n\n # check for true positives, false positives and false negatives\n for tag, prediction in predicted_tags:\n if (tag, prediction) in gold_tags:\n metric.add_tp(tag)\n else:\n metric.add_fp(tag)\n\n for tag, gold in gold_tags:\n if (tag, gold) not in predicted_tags:\n metric.add_fn(tag)\n else:\n metric.add_tn(tag)\n\n store_embeddings(batch, embedding_storage_mode)\n\n eval_loss /= batch_no\n\n if out_path is not None:\n with open(out_path, \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(\"\".join(lines))\n\n detailed_result = (\n f\"\\nMICRO_AVG: acc {metric.micro_avg_accuracy()} - f1-score {metric.micro_avg_f_score()}\"\n f\"\\nMACRO_AVG: acc {metric.macro_avg_accuracy()} - f1-score {metric.macro_avg_f_score()}\"\n )\n for class_name in metric.get_classes():\n detailed_result += (\n f\"\\n{class_name:<10} tp: {metric.get_tp(class_name)} - fp: {metric.get_fp(class_name)} - \"\n f\"fn: {metric.get_fn(class_name)} - tn: {metric.get_tn(class_name)} - precision: \"\n f\"{metric.precision(class_name):.4f} - recall: {metric.recall(class_name):.4f} - \"\n f\"accuracy: {metric.accuracy(class_name):.4f} - f1-score: \"\n f\"{metric.f_score(class_name):.4f}\"\n )\n\n result = Result(\n main_score=metric.micro_avg_f_score(),\n log_line=f\"{metric.precision()}\\t{metric.recall()}\\t{metric.micro_avg_f_score()}\",\n log_header=\"PRECISION\\tRECALL\\tF1\",\n detailed_results=detailed_result,\n )\n\n return result, eval_loss\n\n def forward_loss(\n self, data_points: Union[List[Sentence], Sentence], sort=True\n ) -> torch.tensor:\n features = self.forward(data_points)\n return self._calculate_loss(features, data_points)\n\n def forward(self, sentences: List[Sentence]):\n\n self.embeddings.embed(sentences)\n\n lengths: List[int] = [len(sentence.tokens) for sentence in sentences]\n longest_token_sequence_in_batch: int = max(lengths)\n\n pre_allocated_zero_tensor = torch.zeros(\n self.embeddings.embedding_length * longest_token_sequence_in_batch,\n dtype=torch.float,\n device=flair.device,\n )\n\n all_embs = list()\n for sentence in sentences:\n all_embs += [\n emb for token in sentence for emb in token.get_each_embedding()\n ]\n nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)\n\n if nb_padding_tokens > 0:\n t = pre_allocated_zero_tensor[\n : self.embeddings.embedding_length * nb_padding_tokens\n ]\n all_embs.append(t)\n\n sentence_tensor = torch.cat(all_embs).view(\n [\n len(sentences),\n longest_token_sequence_in_batch,\n self.embeddings.embedding_length,\n ]\n )\n\n # --------------------------------------------------------------------\n # FF PART\n # --------------------------------------------------------------------\n if self.use_dropout > 0.0:\n sentence_tensor = self.dropout(sentence_tensor)\n if self.use_word_dropout > 0.0:\n sentence_tensor = self.word_dropout(sentence_tensor)\n if self.use_locked_dropout > 0.0:\n sentence_tensor = self.locked_dropout(sentence_tensor)\n\n if self.relearn_embeddings:\n sentence_tensor = self.embedding2nn(sentence_tensor)\n\n if self.use_rnn:\n packed = torch.nn.utils.rnn.pack_padded_sequence(\n sentence_tensor, lengths, enforce_sorted=False, batch_first=True\n )\n\n # if initial hidden state is trainable, use this state\n if self.train_initial_hidden_state:\n initial_hidden_state = [\n self.lstm_init_h.unsqueeze(1).repeat(1, len(sentences), 1),\n self.lstm_init_c.unsqueeze(1).repeat(1, len(sentences), 1),\n ]\n rnn_output, hidden = self.rnn(packed, initial_hidden_state)\n else:\n rnn_output, hidden = self.rnn(packed)\n\n sentence_tensor, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(\n rnn_output, batch_first=True\n )\n\n if self.use_dropout > 0.0:\n sentence_tensor = self.dropout(sentence_tensor)\n # word dropout only before LSTM - TODO: more experimentation needed\n # if self.use_word_dropout > 0.0:\n # sentence_tensor = self.word_dropout(sentence_tensor)\n if self.use_locked_dropout > 0.0:\n sentence_tensor = self.locked_dropout(sentence_tensor)\n\n features = self.linear(sentence_tensor)\n\n return features\n\n def _score_sentence(self, feats, tags, lens_):\n\n start = torch.tensor(\n [self.tag_dictionary.get_idx_for_item(START_TAG)], device=flair.device\n )\n start = start[None, :].repeat(tags.shape[0], 1)\n\n stop = torch.tensor(\n [self.tag_dictionary.get_idx_for_item(STOP_TAG)], device=flair.device\n )\n stop = stop[None, :].repeat(tags.shape[0], 1)\n\n pad_start_tags = torch.cat([start, tags], 1)\n pad_stop_tags = torch.cat([tags, stop], 1)\n\n for i in range(len(lens_)):\n pad_stop_tags[i, lens_[i] :] = self.tag_dictionary.get_idx_for_item(\n STOP_TAG\n )\n\n score = torch.FloatTensor(feats.shape[0]).to(flair.device)\n\n for i in range(feats.shape[0]):\n r = torch.LongTensor(range(lens_[i])).to(flair.device)\n\n score[i] = torch.sum(\n self.transitions[\n pad_stop_tags[i, : lens_[i] + 1], pad_start_tags[i, : lens_[i] + 1]\n ]\n ) + torch.sum(feats[i, r, tags[i, : lens_[i]]])\n\n return score\n\n def _calculate_loss(\n self, features: torch.tensor, sentences: List[Sentence]\n ) -> float:\n\n lengths: List[int] = [len(sentence.tokens) for sentence in sentences]\n\n tag_list: List = []\n for s_id, sentence in enumerate(sentences):\n # get the tags in this sentence\n tag_idx: List[int] = [\n self.tag_dictionary.get_idx_for_item(token.get_tag(self.tag_type).value)\n for token in sentence\n ]\n # add tags as tensor\n tag = torch.tensor(tag_idx, device=flair.device)\n tag_list.append(tag)\n\n if self.use_crf:\n # pad tags if using batch-CRF decoder\n tags, _ = pad_tensors(tag_list)\n\n forward_score = self._forward_alg(features, lengths)\n gold_score = self._score_sentence(features, tags, lengths)\n\n score = forward_score - gold_score\n\n return score.mean()\n\n else:\n score = 0\n for sentence_feats, sentence_tags, sentence_length in zip(\n features, tag_list, lengths\n ):\n sentence_feats = sentence_feats[:sentence_length]\n score += torch.nn.functional.cross_entropy(\n sentence_feats, sentence_tags, weight=self.weights\n )\n score /= len(features)\n return score\n\n def _obtain_labels(\n self,\n feature: torch.Tensor,\n batch_sentences: List[Sentence],\n transitions: Optional[np.ndarray],\n get_all_tags: bool,\n ) -> (List[List[Label]], List[List[List[Label]]]):\n \"\"\"\n Returns a tuple of two lists:\n - The first list corresponds to the most likely `Label` per token in each sentence.\n - The second list contains a probability distribution over all `Labels` for each token\n in a sentence for all sentences.\n \"\"\"\n\n lengths: List[int] = [len(sentence.tokens) for sentence in batch_sentences]\n\n tags = []\n all_tags = []\n feature = feature.cpu()\n if self.use_crf:\n feature = feature.numpy()\n else:\n for index, length in enumerate(lengths):\n feature[index, length:] = 0\n softmax_batch = F.softmax(feature, dim=2).cpu()\n scores_batch, prediction_batch = torch.max(softmax_batch, dim=2)\n feature = zip(softmax_batch, scores_batch, prediction_batch)\n\n for feats, length in zip(feature, lengths):\n if self.use_crf:\n confidences, tag_seq, scores = self._viterbi_decode(\n feats=feats[:length],\n transitions=transitions,\n all_scores=get_all_tags,\n )\n else:\n softmax, score, prediction = feats\n confidences = score[:length].tolist()\n tag_seq = prediction[:length].tolist()\n scores = softmax[:length].tolist()\n\n tags.append(\n [\n Label(self.tag_dictionary.get_item_for_index(tag), conf)\n for conf, tag in zip(confidences, tag_seq)\n ]\n )\n\n if get_all_tags:\n all_tags.append(\n [\n [\n Label(\n self.tag_dictionary.get_item_for_index(score_id), score\n )\n for score_id, score in enumerate(score_dist)\n ]\n for score_dist in scores\n ]\n )\n\n return tags, all_tags\n\n @staticmethod\n def _softmax(x, axis):\n # reduce raw values to avoid NaN during exp\n x_norm = x - x.max(axis=axis, keepdims=True)\n y = np.exp(x_norm)\n return y / y.sum(axis=axis, keepdims=True)\n\n def _viterbi_decode(\n self, feats: np.ndarray, transitions: np.ndarray, all_scores: bool\n ):\n id_start = self.tag_dictionary.get_idx_for_item(START_TAG)\n id_stop = self.tag_dictionary.get_idx_for_item(STOP_TAG)\n\n backpointers = np.empty(shape=(feats.shape[0], self.tagset_size), dtype=np.int_)\n backscores = np.empty(\n shape=(feats.shape[0], self.tagset_size), dtype=np.float32\n )\n\n init_vvars = np.expand_dims(\n np.repeat(-10000.0, self.tagset_size), axis=0\n ).astype(np.float32)\n init_vvars[0][id_start] = 0\n\n forward_var = init_vvars\n for index, feat in enumerate(feats):\n # broadcasting will do the job of reshaping and is more efficient than calling repeat\n next_tag_var = forward_var + transitions\n bptrs_t = next_tag_var.argmax(axis=1)\n viterbivars_t = next_tag_var[np.arange(bptrs_t.shape[0]), bptrs_t]\n forward_var = viterbivars_t + feat\n backscores[index] = forward_var\n forward_var = forward_var[np.newaxis, :]\n backpointers[index] = bptrs_t\n\n terminal_var = forward_var.squeeze() + transitions[id_stop]\n terminal_var[id_stop] = -10000.0\n terminal_var[id_start] = -10000.0\n best_tag_id = terminal_var.argmax()\n\n best_path = [best_tag_id]\n for bptrs_t in reversed(backpointers):\n best_tag_id = bptrs_t[best_tag_id]\n best_path.append(best_tag_id)\n\n start = best_path.pop()\n assert start == id_start\n best_path.reverse()\n\n best_scores_softmax = self._softmax(backscores, axis=1)\n best_scores_np = np.max(best_scores_softmax, axis=1)\n\n # default value\n all_scores_np = np.zeros(0, dtype=np.float64)\n if all_scores:\n all_scores_np = best_scores_softmax\n for index, (tag_id, tag_scores) in enumerate(zip(best_path, all_scores_np)):\n if type(tag_id) != int and tag_id.item() != tag_scores.argmax():\n swap_index_score = tag_scores.argmax()\n (\n all_scores_np[index][tag_id.item()],\n all_scores_np[index][swap_index_score],\n ) = (\n all_scores_np[index][swap_index_score],\n all_scores_np[index][tag_id.item()],\n )\n elif type(tag_id) == int and tag_id != tag_scores.argmax():\n swap_index_score = tag_scores.argmax()\n (\n all_scores_np[index][tag_id],\n all_scores_np[index][swap_index_score],\n ) = (\n all_scores_np[index][swap_index_score],\n all_scores_np[index][tag_id],\n )\n\n return best_scores_np.tolist(), best_path, all_scores_np.tolist()\n\n def _forward_alg(self, feats, lens_):\n\n init_alphas = torch.FloatTensor(self.tagset_size).fill_(-10000.0)\n init_alphas[self.tag_dictionary.get_idx_for_item(START_TAG)] = 0.0\n\n forward_var = torch.zeros(\n feats.shape[0],\n feats.shape[1] + 1,\n feats.shape[2],\n dtype=torch.float,\n device=flair.device,\n )\n\n forward_var[:, 0, :] = init_alphas[None, :].repeat(feats.shape[0], 1)\n\n transitions = self.transitions.view(\n 1, self.transitions.shape[0], self.transitions.shape[1]\n ).repeat(feats.shape[0], 1, 1)\n\n for i in range(feats.shape[1]):\n emit_score = feats[:, i, :]\n\n tag_var = (\n emit_score[:, :, None].repeat(1, 1, transitions.shape[2])\n + transitions\n + forward_var[:, i, :][:, :, None]\n .repeat(1, 1, transitions.shape[2])\n .transpose(2, 1)\n )\n\n max_tag_var, _ = torch.max(tag_var, dim=2)\n\n tag_var = tag_var - max_tag_var[:, :, None].repeat(\n 1, 1, transitions.shape[2]\n )\n\n agg_ = torch.log(torch.sum(torch.exp(tag_var), dim=2))\n\n cloned = forward_var.clone()\n cloned[:, i + 1, :] = max_tag_var + agg_\n\n forward_var = cloned\n\n forward_var = forward_var[range(forward_var.shape[0]), lens_, :]\n\n terminal_var = forward_var + self.transitions[\n self.tag_dictionary.get_idx_for_item(STOP_TAG)\n ][None, :].repeat(forward_var.shape[0], 1)\n\n alpha = log_sum_exp_batch(terminal_var)\n\n return alpha\n\n @staticmethod\n def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:\n filtered_sentences = [sentence for sentence in sentences if sentence.tokens]\n if len(sentences) != len(filtered_sentences):\n log.warning(\n f\"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.\"\n )\n return filtered_sentences\n\n @staticmethod\n def _filter_empty_string(texts: List[str]) -> List[str]:\n filtered_texts = [text for text in texts if text]\n if len(texts) != len(filtered_texts):\n log.warning(\n f\"Ignore {len(texts) - len(filtered_texts)} string(s) with no tokens.\"\n )\n return filtered_texts\n\n @staticmethod\n def _fetch_model(model_name) -> str:\n\n model_map = {}\n\n aws_resource_path_v04 = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4\"\n )\n\n model_map[\"ner\"] = \"/\".join(\n [aws_resource_path_v04, \"NER-conll03-english\", \"en-ner-conll03-v0.4.pt\"]\n )\n\n model_map[\"ner-fast\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"NER-conll03--h256-l1-b32-p3-0.5-%2Bglove%2Bnews-forward-fast%2Bnews-backward-fast-normal-locked0.5-word0.05--release_4\",\n \"en-ner-fast-conll03-v0.4.pt\",\n ]\n )\n\n model_map[\"ner-ontonotes\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"release-ner-ontonotes-0\",\n \"en-ner-ontonotes-v0.4.pt\",\n ]\n )\n\n model_map[\"ner-ontonotes-fast\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"release-ner-ontonotes-fast-0\",\n \"en-ner-ontonotes-fast-v0.4.pt\",\n ]\n )\n\n for key in [\"ner-multi\", \"multi-ner\"]:\n model_map[key] = \"/\".join(\n [\n aws_resource_path_v04,\n \"release-quadner-512-l2-multi-embed\",\n \"quadner-large.pt\",\n ]\n )\n\n for key in [\"ner-multi-fast\", \"multi-ner-fast\"]:\n model_map[key] = \"/\".join(\n [aws_resource_path_v04, \"NER-multi-fast\", \"ner-multi-fast.pt\"]\n )\n\n for key in [\"ner-multi-fast-learn\", \"multi-ner-fast-learn\"]:\n model_map[key] = \"/\".join(\n [\n aws_resource_path_v04,\n \"NER-multi-fast-evolve\",\n \"ner-multi-fast-learn.pt\",\n ]\n )\n\n model_map[\"pos\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"POS-ontonotes--h256-l1-b32-p3-0.5-%2Bglove%2Bnews-forward%2Bnews-backward-normal-locked0.5-word0.05--v0.4_0\",\n \"en-pos-ontonotes-v0.4.pt\",\n ]\n )\n\n model_map[\"pos-fast\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"release-pos-fast-0\",\n \"en-pos-ontonotes-fast-v0.4.pt\",\n ]\n )\n\n for key in [\"pos-multi\", \"multi-pos\"]:\n model_map[key] = \"/\".join(\n [\n aws_resource_path_v04,\n \"release-dodekapos-512-l2-multi\",\n \"pos-multi-v0.1.pt\",\n ]\n )\n\n for key in [\"pos-multi-fast\", \"multi-pos-fast\"]:\n model_map[key] = \"/\".join(\n [aws_resource_path_v04, \"UPOS-multi-fast\", \"pos-multi-fast.pt\"]\n )\n\n model_map[\"frame\"] = \"/\".join(\n [aws_resource_path_v04, \"release-frame-1\", \"en-frame-ontonotes-v0.4.pt\"]\n )\n\n model_map[\"frame-fast\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"release-frame-fast-0\",\n \"en-frame-ontonotes-fast-v0.4.pt\",\n ]\n )\n\n model_map[\"chunk\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"NP-conll2000--h256-l1-b32-p3-0.5-%2Bnews-forward%2Bnews-backward-normal-locked0.5-word0.05--v0.4_0\",\n \"en-chunk-conll2000-v0.4.pt\",\n ]\n )\n\n model_map[\"chunk-fast\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"release-chunk-fast-0\",\n \"en-chunk-conll2000-fast-v0.4.pt\",\n ]\n )\n\n model_map[\"da-pos\"] = \"/\".join(\n [aws_resource_path_v04, \"POS-danish\", \"da-pos-v0.1.pt\"]\n )\n\n model_map[\"da-ner\"] = \"/\".join(\n [aws_resource_path_v04, \"NER-danish\", \"da-ner-v0.1.pt\"]\n )\n\n model_map[\"de-pos\"] = \"/\".join(\n [aws_resource_path_v04, \"release-de-pos-0\", \"de-pos-ud-hdt-v0.4.pt\"]\n )\n\n model_map[\"de-pos-fine-grained\"] = \"/\".join(\n [\n aws_resource_path_v04,\n \"POS-fine-grained-german-tweets\",\n \"de-pos-twitter-v0.1.pt\",\n ]\n )\n\n model_map[\"de-ner\"] = \"/\".join(\n [aws_resource_path_v04, \"release-de-ner-0\", \"de-ner-conll03-v0.4.pt\"]\n )\n\n model_map[\"de-ner-germeval\"] = \"/\".join(\n [aws_resource_path_v04, \"NER-germeval\", \"de-ner-germeval-0.4.1.pt\"]\n )\n\n model_map[\"fr-ner\"] = \"/\".join(\n [aws_resource_path_v04, \"release-fr-ner-0\", \"fr-ner-wikiner-0.4.pt\"]\n )\n model_map[\"nl-ner\"] = \"/\".join(\n [aws_resource_path_v04, \"NER-conll2002-dutch\", \"nl-ner-conll02-v0.1.pt\"]\n )\n\n cache_dir = Path(\"models\")\n if model_name in model_map:\n model_name = cached_path(model_map[model_name], cache_dir=cache_dir)\n\n return model_name\n\n def get_transition_matrix(self):\n data = []\n for to_idx, row in enumerate(self.transitions):\n for from_idx, column in enumerate(row):\n row = [\n self.tag_dictionary.get_item_for_index(from_idx),\n self.tag_dictionary.get_item_for_index(to_idx),\n column.item(),\n ]\n data.append(row)\n data.append([\"----\"])\n print(tabulate(data, headers=[\"FROM\", \"TO\", \"SCORE\"]))\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.sum",
"numpy.max",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.no_grad",
"torch.FloatTensor",
"numpy.exp",
"torch.nn.Dropout",
"torch.randn",
"numpy.arange",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.tensor",
"numpy.repeat",
"numpy.zeros",
"torch.exp",
"torch.nn.Linear",
"torch.nn.functional.cross_entropy",
"numpy.empty"
]
] |
chao-tan/FORECAST-CLSTM
|
[
"79a604717e9c6a87976c7ec274c4ed98461df4ef"
] |
[
"tools.py"
] |
[
"# -*- coding: utf-8 -*-\r\nimport torch.nn as nn\r\nimport torch\r\n\r\n\r\nclass ConvLSTMCell(nn.Module):\r\n def __init__(self, input_size,input_dim,hidden_dim,kernel_size,bias):\r\n super(ConvLSTMCell, self).__init__()\r\n\r\n self.height, self.width = input_size\r\n self.input_dim = input_dim\r\n self.hidden_dim = hidden_dim\r\n self.kernel_size = kernel_size\r\n self.padding = (kernel_size[0]//2, kernel_size[1]//2)\r\n self.bias = bias\r\n\r\n self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,\r\n out_channels=4 * self.hidden_dim,\r\n kernel_size=self.kernel_size,\r\n padding=self.padding,\r\n bias=self.bias)\r\n\r\n\r\n def forward(self, input_tensor, cur_state):\r\n h_cur, c_cur = cur_state\r\n combined = torch.cat([input_tensor, h_cur], dim=1)\r\n\r\n combined_conv = self.conv(combined)\r\n cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)\r\n i = torch.sigmoid(cc_i)\r\n f = torch.sigmoid(cc_f)\r\n o = torch.sigmoid(cc_o)\r\n g = torch.tanh(cc_g)\r\n\r\n c_next = f * c_cur + i * g\r\n h_next = o * torch.tanh(c_next)\r\n\r\n return h_next, c_next\r\n\r\n def init_hidden(self, batch_size):\r\n if torch.cuda.is_available():\r\n return (torch.zeros(batch_size, self.hidden_dim, self.height, self.width).cuda(),\r\n torch.zeros(batch_size, self.hidden_dim, self.height, self.width).cuda())\r\n else:\r\n return (torch.zeros(batch_size, self.hidden_dim, self.height, self.width),\r\n torch.zeros(batch_size, self.hidden_dim, self.height, self.width))\r\n\r\n\r\n\r\nclass ConvLSTM(nn.Module):\r\n\r\n def __init__(self, input_size, # exp:(200,200)\r\n input_dim, # exp:1\r\n hidden_dim, # exp:32\r\n kernel_size, # exp:(3,3)\r\n return_one=False,\r\n bias=True):\r\n\r\n super(ConvLSTM, self).__init__()\r\n self.height, self.width = input_size\r\n self.input_dim = input_dim\r\n self.hidden_dim = hidden_dim\r\n self.kernel_size = kernel_size\r\n self.use_gpu = torch.cuda.is_available()\r\n self.return_one = return_one\r\n self.bias = bias\r\n\r\n self.cell = ConvLSTMCell(input_size=(self.height,self.width),\r\n input_dim=self.input_dim,\r\n hidden_dim=self.hidden_dim,\r\n kernel_size=self.kernel_size,\r\n bias=self.bias)\r\n\r\n\r\n # input_tensor with shape (batchsize,steps,channels,height,width)\r\n # input hidden_state is [h,c] list\r\n # for h and c with shape (batchsize,out_channels,height,width)\r\n # if return_one = True, return (batchsize,channels,height,width)\r\n # if return_one = False, return (batchsize,steps,channels,height,width)\r\n def forward(self, input_tensor,\r\n hidden_state=None):\r\n\r\n if hidden_state is None:\r\n hidden_state = self._init_hidden(batch_size=input_tensor.size(0))\r\n\r\n seq_len = input_tensor.size(1)\r\n\r\n h, c = hidden_state\r\n output_inner = []\r\n for t in range(seq_len):\r\n h, c = self.cell(input_tensor=input_tensor[:, t, :, :, :],\r\n cur_state=[h, c])\r\n output_inner.append(h)\r\n\r\n layer_output = torch.stack(output_inner, dim=1)\r\n\r\n if self.return_one:\r\n return layer_output[:,-1,:,:,:],[h,c]\r\n else:\r\n return layer_output, [h,c]\r\n\r\n\r\n def _init_hidden(self, batch_size):\r\n return self.cell.init_hidden(batch_size)\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass FSCONV2D(nn.Module):\r\n def __init__(self,in_channels, #Input channels of the samples\r\n out_channels, #Output channels of the samples\r\n kernel_size, #Kernel size of the convolution operation\r\n stride, #Stride of the convolution operation\r\n padding, #Padding of the convolution operation\r\n bias=True): #If use bias for every kernel\r\n\r\n super(FSCONV2D,self).__init__()\r\n self.input_channels = in_channels\r\n self.out_channels = out_channels\r\n self.kernel_size = kernel_size\r\n self.stride = stride\r\n self.padding = padding\r\n self.bias = bias\r\n\r\n self.conv = nn.Conv2d(in_channels=self.input_channels,\r\n out_channels=self.out_channels,\r\n kernel_size=self.kernel_size,\r\n stride=self.stride,\r\n padding=self.padding,\r\n bias=self.bias)\r\n\r\n def forward(self, x): # input x with shape:(batchsize,steps,channels,width,height)\r\n\r\n x_split = torch.split(x,1,dim=1)\r\n out =[]\r\n for i in range(len(x_split)):\r\n out.append(self.conv(x_split[i].squeeze(dim=1)))\r\n\r\n # output with shape:(batchsize,steps,channels,width,height)\r\n return torch.stack(out,dim=1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass FSDCONV2D(nn.Module):\r\n def __init__(self,in_channels, #Input channels of the samples\r\n out_channels, #Output channels of the samples\r\n kernel_size, #Kernel size of the convolution operation\r\n stride, #Stride of the convolution operation\r\n padding, #Padding of the convolution operation\r\n bias=True): #If use bias for every kernel\r\n\r\n super(FSDCONV2D,self).__init__()\r\n self.input_channels = in_channels\r\n self.out_channels = out_channels\r\n self.kernel_size = kernel_size\r\n self.stride = stride\r\n self.padding = padding\r\n self.bias = bias\r\n\r\n self.conv = nn.ConvTranspose2d(in_channels=self.input_channels,\r\n out_channels=self.out_channels,\r\n kernel_size=self.kernel_size,\r\n stride=self.stride,\r\n padding=self.padding,\r\n bias=self.bias)\r\n\r\n def forward(self, x): # input x with shape:(batchsize,steps,channels,width,height)\r\n\r\n x_split = torch.split(x,1,dim=1)\r\n out =[]\r\n for i in range(len(x_split)):\r\n out.append(self.conv(x_split[i].squeeze(dim=1)))\r\n\r\n # output with shape:(batchsize,steps,channels,width,height)\r\n return torch.stack(out,dim=1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"The Implement of First Seprate Pooling Network\"\r\nclass FSPOOL2D(nn.Module):\r\n def __init__(self, kernel_size=(2,2),\r\n stride=(2,2)):\r\n super(FSPOOL2D, self).__init__()\r\n self.kernel_size = kernel_size\r\n self.stride = stride\r\n\r\n self.pooling = nn.MaxPool2d(kernel_size=self.kernel_size,\r\n stride=self.stride,\r\n return_indices=True)\r\n\r\n def forward(self, x): # input x with shape:(batchsize,steps,channels,width,height)\r\n x_split = torch.split(x, 1, dim=1)\r\n out = []\r\n ind = []\r\n for i in range(len(x_split)):\r\n c, indx = self.pooling(x_split[i].squeeze(dim=1))\r\n out.append(c)\r\n ind.append(indx)\r\n\r\n # output with shape:(batchsize,steps,channels,width,height)\r\n return torch.stack(out, dim=1), ind\r\n\r\n\r\n\r\nclass FSUNPOOLING(nn.Module):\r\n def __init__(self,kernel_size=(2,2)):\r\n super(FSUNPOOLING,self).__init__()\r\n self.kernel_size = kernel_size\r\n\r\n self.unpooling = nn.MaxUnpool2d(kernel_size=self.kernel_size)\r\n\r\n def forward(self, x,ind):\r\n x_split = torch.split(x,1,dim=1)\r\n out=[]\r\n for i in range(len(x_split)):\r\n out.append(self.unpooling(x_split[i].squeeze(1),ind))\r\n\r\n return torch.stack(out,dim=1)\r\n\r\n\r\nclass FORECASTER_LOSS(nn.Module):\r\n def __init__(self):\r\n super(FORECASTER_LOSS,self).__init__()\r\n\r\n def forward(self, output,ground):\r\n output = output.view(-1)\r\n ground = ground.view(-1)\r\n gap = torch.abs(output-ground)\r\n weight = (output+ground-gap)/2\r\n weight = 1-weight/255.0\r\n weight = torch.exp(weight)\r\n loss = torch.mean(weight*(output-ground)*(output-ground))\r\n return loss\r\n"
] |
[
[
"torch.abs",
"torch.sigmoid",
"torch.mean",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.MaxUnpool2d",
"torch.zeros",
"torch.nn.Conv2d",
"torch.tanh",
"torch.nn.MaxPool2d",
"torch.exp",
"torch.cuda.is_available",
"torch.split",
"torch.stack"
]
] |
waveletgap/wgap
|
[
"564423507747cc31c99e7bb01d46f869574a845d"
] |
[
"splitDataset.py"
] |
[
"\"\"\"\nScript that splits the dataset by ratio\n\"\"\"\n\nimport shutil\nimport os\nimport numpy as np\nimport argparse\n\ndef get_files_from_folder(path):\n\n files = os.listdir(path)\n return np.asarray(files)\n\ndef main(path_to_data, path_to_test_data, train_ratio):\n # get dirs\n _, dirs, _ = next(os.walk(path_to_data))\n\n # calculates how many train data per class\n data_counter_per_class = np.zeros((len(dirs)))\n for i in range(len(dirs)):\n path = os.path.join(path_to_data, dirs[i])\n files = get_files_from_folder(path)\n data_counter_per_class[i] = len(files)\n test_counter = np.round(data_counter_per_class * (1 - train_ratio))\n\n # transfers files\n for i in range(len(dirs)):\n path_to_original = os.path.join(path_to_data, dirs[i])\n path_to_save = os.path.join(path_to_test_data, dirs[i])\n\n #creates dir\n if not os.path.exists(path_to_save):\n os.makedirs(path_to_save)\n files = get_files_from_folder(path_to_original)\n # moves data\n for j in range(int(test_counter[i])):\n dst = os.path.join(path_to_save, files[j])\n src = os.path.join(path_to_original, files[j])\n shutil.move(src, dst)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Dataset divider\")\n parser.add_argument(\"--data_path\", required=True,\n help=\"Path to data\")\n parser.add_argument(\"--test_data_path_to_save\", required=True,\n help=\"Path to test data where to save\")\n parser.add_argument(\"--train_ratio\", required=True,\n help=\"Train ratio - 0.7 means splitting data in 70 % train and 30 % test\")\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args.data_path, args.test_data_path_to_save, float(args.train_ratio))\n"
] |
[
[
"numpy.asarray",
"numpy.round"
]
] |
BerenLuthien/ReAgent
|
[
"52f666670a7fa03206812ef48949f6b934d400f7"
] |
[
"reagent/preprocessing/transforms.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport logging\nfrom typing import Callable, List, Optional\n\nimport numpy as np\nimport reagent.core.types as rlt\nimport torch\nimport torch.nn.functional as F\nfrom reagent.core.parameters import NormalizationData\nfrom reagent.preprocessing.preprocessor import Preprocessor\nfrom reagent.preprocessing.sparse_preprocessor import make_sparse_preprocessor\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Compose:\n \"\"\"\n Applies an iterable collection of transform functions\n \"\"\"\n\n def __init__(self, *transforms):\n self.transforms = transforms\n\n def __call__(self, data):\n for t in self.transforms:\n data = t(data)\n return data\n\n def __repr__(self):\n transforms = \"\\n \".join([repr(t) for t in self.transforms])\n return f\"{self.__class__.__name__}(\\n{transforms}\\n)\"\n\n\n# TODO: this wouldn't work for possible_actions_mask (list of value, presence)\nclass ValuePresence:\n \"\"\"\n For every key `x`, looks for `x_presence`; if `x_presence` exists,\n replace `x` with tuple of `x` and `x_presence`, delete `x_presence` key\n \"\"\"\n\n def __call__(self, data):\n keys = list(data.keys())\n\n for k in keys:\n presence_key = f\"{k}_presence\"\n if presence_key in data:\n data[k] = (data[k], data[presence_key])\n del data[presence_key]\n\n return data\n\n\nclass Lambda:\n \"\"\"Applies an arbitrary callable transform\"\"\"\n\n def __init__(self, keys: List[str], fn: Callable):\n self.keys = keys\n self.fn = fn\n\n def __call__(self, data):\n for k in self.keys:\n data[k] = self.fn(data[k])\n return data\n\n\nclass SelectValuePresenceColumns:\n \"\"\"\n Select columns from value-presence source key\n \"\"\"\n\n def __init__(self, source: str, dest: str, indices: List[int]):\n self.source = source\n self.dest = dest\n self.indices = indices\n\n def __call__(self, data):\n value, presence = data[self.source]\n data[self.dest] = (value[:, self.indices], presence[:, self.indices])\n return data\n\n\nclass DenseNormalization:\n \"\"\"\n Normalize the `keys` using `normalization_data`.\n The keys are expected to be `Tuple[torch.Tensor, torch.Tensor]`,\n where the first element is the value and the second element is the\n presence mask.\n This transform replaces the keys in the input data.\n \"\"\"\n\n def __init__(\n self,\n keys: List[str],\n normalization_data: NormalizationData,\n device: Optional[torch.device] = None,\n ):\n \"\"\"\n Args:\n keys: the name of the keys to be transformed\n \"\"\"\n self.keys = keys\n self.normalization_data = normalization_data\n self.device = device or torch.device(\"cpu\")\n # Delay the initialization of the preprocessor so this class\n # is pickleable\n self._preprocessor: Optional[Preprocessor] = None\n\n def __call__(self, data):\n if self._preprocessor is None:\n self._preprocessor = Preprocessor(\n self.normalization_data.dense_normalization_parameters,\n device=self.device,\n )\n\n for k in self.keys:\n value, presence = data[k]\n value, presence = value.to(self.device), presence.to(self.device)\n presence[torch.isnan(value)] = 0\n value[torch.isnan(value)] = 0\n data[k] = self._preprocessor(value, presence).float()\n\n return data\n\n\nclass MapIDListFeatures:\n \"\"\"\n Applies a SparsePreprocessor (see sparse_preprocessor.SparsePreprocessor)\n \"\"\"\n\n def __init__(\n self,\n id_list_keys: List[str],\n id_score_list_keys: List[str],\n feature_config: rlt.ModelFeatureConfig,\n device: torch.device,\n ):\n self.id_list_keys = id_list_keys\n self.id_score_list_keys = id_score_list_keys\n assert (\n set(id_list_keys).intersection(set(id_score_list_keys)) == set()\n ), f\"id_list_keys: {id_list_keys}; id_score_list_keys: {id_score_list_keys}\"\n self.feature_config = feature_config\n self.sparse_preprocessor = make_sparse_preprocessor(\n feature_config=feature_config, device=device\n )\n\n def __call__(self, data):\n for k in self.id_list_keys + self.id_score_list_keys:\n # if no ids, it means we're not using sparse features.\n if not self.feature_config.id2name or k not in data:\n data[k] = None\n continue\n\n assert isinstance(data[k], dict), f\"{k} has type {type(data[k])}. {data[k]}\"\n if k in self.id_list_keys:\n data[k] = self.sparse_preprocessor.preprocess_id_list(data[k])\n else:\n data[k] = self.sparse_preprocessor.preprocess_id_score_list(data[k])\n return data\n\n\nclass OneHotActions:\n \"\"\"\n Keys should be in the set {0,1,2,...,num_actions}, where\n a value equal to num_actions denotes that it's not valid.\n \"\"\"\n\n def __init__(self, keys: List[str], num_actions: int):\n self.keys = keys\n self.num_actions = num_actions\n\n def __call__(self, data):\n for k in self.keys:\n # we do + 1 and then index up to n because value could be num_actions,\n # in which case the result is a zero-vector\n data[k] = F.one_hot(data[k], self.num_actions + 1).index_select(\n -1, torch.arange(self.num_actions)\n )\n return data\n\n\nclass ColumnVector:\n \"\"\"\n Ensure that the keys are column vectors\n \"\"\"\n\n def __init__(self, keys: List[str]):\n self.keys = keys\n\n def __call__(self, data):\n for k in self.keys:\n raw_value = data[k]\n if isinstance(raw_value, tuple):\n value, _presence = raw_value\n elif isinstance(raw_value, list):\n # TODO(T67265031): make mdp_id a tensor, which we will be able to\n # when column type changes to int\n value = np.array(raw_value)\n elif isinstance(raw_value, torch.Tensor):\n # TODO(T67265031): this is an identity mapping, which is only necessary\n # when mdp_id in traced batch preprocessors becomes a tensor (mdp_id\n # is a list of strings in normal batch preprocessors).\n value = raw_value\n else:\n raise NotImplementedError(f\"value of type {type(raw_value)}.\")\n\n assert value.ndim == 1 or (\n value.ndim == 2 and value.shape[1] == 1\n ), f\"Invalid shape for key {k}: {value.shape}\"\n data[k] = value.reshape(-1, 1)\n\n return data\n\n\nclass MaskByPresence:\n \"\"\"\n Expect data to be (value, presence) and return value * presence.\n This zeros out values that aren't present.\n \"\"\"\n\n def __init__(self, keys: List[str]):\n self.keys = keys\n\n def __call__(self, data):\n for k in self.keys:\n value_presence = data[k]\n assert (\n isinstance(value_presence, tuple) and len(value_presence) == 2\n ), f\"Not valid value, presence tuple: {value_presence}\"\n value, presence = value_presence\n assert value.shape == presence.shape, (\n f\"Unmatching value shape ({value.shape})\"\n f\" and presence shape ({presence.shape})\"\n )\n data[k] = value * presence.float()\n\n return data\n\n\nclass StackDenseFixedSizeArray:\n \"\"\"\n If data is a tensor, ensures it has the correct shape. If data is a list of\n (value, presence) discards the presence tensors and concatenates the values\n to output a tensor of shape (batch_size, feature_dim).\n \"\"\"\n\n def __init__(self, keys: List[str], size: int, dtype=torch.float):\n self.keys = keys\n self.size = size\n self.dtype = dtype\n\n def __call__(self, data):\n for k in self.keys:\n value = data[k]\n if isinstance(value, torch.Tensor):\n # Just ensure the shape\n if not (value.ndim == 2 and value.shape[1] == self.size):\n raise ValueError(f\"Wrong shape for key {k}: {value.shape}\")\n data[k] = value.to(self.dtype)\n else:\n # Assuming that value is List[Tuple[torch.Tensor, torch.Tensor]]\n data[k] = (\n torch.cat([v for v, p in value], dim=0)\n .view(-1, self.size)\n .to(dtype=self.dtype)\n )\n return data\n\n\nclass FixedLengthSequences:\n \"\"\"\n Does two things:\n 1. makes sure each sequence in the list of keys has the expected fixed length\n 2. if to_keys is provided, copies the relevant sequence_id to the new key,\n otherwise overwrites the old key\n\n Expects each data[key] to be `Dict[Int, Tuple[Tensor, T]]`. Where:\n - key is the feature id\n - sequence_id is the key of the dict data[key]\n - The first element of the tuple is the offset for each example, which is expected to be in fixed interval.\n - The second element is the data at each step in the sequence\n\n This is mainly for FB internal use,\n see fbcode/caffe2/caffe2/fb/proto/io_metadata.thrift\n for the data format extracted from SequenceFeatureMetadata\n\n NOTE: this is not product between two lists (keys and to_keys);\n it's setting keys[sequence_id] to to_keys in a parallel way\n \"\"\"\n\n def __init__(\n self,\n keys: List[str],\n sequence_id: int,\n expected_length: Optional[int] = None,\n *,\n to_keys: Optional[List[str]] = None,\n ):\n self.keys = keys\n self.sequence_id = sequence_id\n self.to_keys = to_keys or keys\n assert len(self.to_keys) == len(keys)\n self.expected_length = expected_length\n\n def __call__(self, data):\n for key, to_key in zip(self.keys, self.to_keys):\n offsets, value = data[key][self.sequence_id]\n # TODO assert regarding offsets length compared to value\n expected_length = self.expected_length\n if expected_length is None:\n if len(offsets) > 1:\n # If batch size is larger than 1, just use the offsets\n expected_length = (offsets[1] - offsets[0]).item()\n else:\n # If batch size is 1\n expected_length = value[0].shape[0]\n self.expected_length = expected_length\n expected_offsets = torch.arange(\n 0, offsets.shape[0] * expected_length, expected_length\n )\n assert all(\n expected_offsets == offsets\n ), f\"Unexpected offsets for {key} {self.sequence_id}: {offsets}. Expected {expected_offsets}\"\n\n data[to_key] = value\n return data\n\n\nclass SlateView:\n \"\"\"\n Assuming that the keys are flatten fixed-length sequences with length of\n `slate_size`, unflatten it by inserting `slate_size` to the 1st dim.\n I.e., turns the input from the shape of `[B * slate_size, D]` to\n `[B, slate_size, D]`.\n \"\"\"\n\n def __init__(self, keys: List[str], slate_size: int):\n self.keys = keys\n self.slate_size = slate_size\n\n def __call__(self, data):\n for k in self.keys:\n value = data[k]\n _, dim = value.shape\n data[k] = value.view(-1, self.slate_size, dim)\n\n return data\n\n\nclass FixedLengthSequenceDenseNormalization:\n \"\"\"\n Combines the FixedLengthSequences, DenseNormalization, and SlateView transforms\n \"\"\"\n\n def __init__(\n self,\n keys: List[str],\n sequence_id: int,\n normalization_data: NormalizationData,\n expected_length: Optional[int] = None,\n device: Optional[torch.device] = None,\n ):\n to_keys = [f\"{k}:{sequence_id}\" for k in keys]\n self.fixed_length_sequences = FixedLengthSequences(\n keys, sequence_id, to_keys=to_keys, expected_length=expected_length\n )\n self.dense_normalization = DenseNormalization(\n to_keys, normalization_data, device=device\n )\n # We will override this in __call__()\n self.slate_view = SlateView(to_keys, slate_size=-1)\n\n def __call__(self, data):\n data = self.fixed_length_sequences(data)\n data = self.dense_normalization(data)\n self.slate_view.slate_size = self.fixed_length_sequences.expected_length\n return self.slate_view(data)\n\n\nclass AppendConstant:\n \"\"\"\n Append a column of constant value at the beginning of the specified dimension\n Can be used to add a column of \"1\" to the Linear Regression input data to capture intercept/bias\n \"\"\"\n\n def __init__(self, keys: List[str], dim: int = -1, const: float = 1.0):\n self.keys = keys\n self.dim = dim\n self.const = const\n\n def __call__(self, data):\n for k in self.keys:\n value = data[k]\n extra_col = self.const * torch.ones(value.shape[:-1]).unsqueeze(-1)\n data[k] = torch.cat((extra_col, value), dim=self.dim)\n return data\n\n\nclass UnsqueezeRepeat:\n \"\"\"\n This transform adds an extra dimension to the tensor and repeats\n the tensor along that dimension\n \"\"\"\n\n def __init__(self, keys: List[str], dim: int, num_repeat: int = 1):\n self.keys = keys\n self.dim = dim\n self.num_repeat = num_repeat\n\n def __call__(self, data):\n for k in self.keys:\n data[k] = data[k].unsqueeze(self.dim)\n if self.num_repeat != 1:\n repeat_counters = [1 for _ in range(data[k].ndim)]\n repeat_counters[self.dim] = self.num_repeat\n data[k] = data[k].repeat(*repeat_counters)\n return data\n\n\ndef _get_product_features(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Get outer product of 2 tensors along the last dimension.\n All dimensions except last are preserved. The last dimension is replaced\n with flattened outer products of last-dimension-vectors from input tensors\n\n This is a vectorized implementation of (for 2D case):\n for i in range(x.shape[0]):\n out[i, :] = torch.outer(x[i, :], y[i, :]).flatten()\n\n For 2D inputs:\n Input shapes:\n x: (batch, feature_dim_x)\n y: (batch, feature_dim_y)\n Output shape:\n (batch, feature_dim_x*feature_dim_y)\n \"\"\"\n return torch.einsum(\"...i,...j->...ij\", (x, y)).flatten(start_dim=-2)\n\n\nclass OuterProduct:\n \"\"\"\n This transform creates a tensor with an outer product of elements of 2 tensors.\n The outer product is stored under the new key.\n The 2 input tensors might be dropped, depending on input arguments\n \"\"\"\n\n def __init__(\n self,\n key1: str,\n key2: str,\n output_key: str,\n drop_inputs: bool = False,\n ):\n self.key1 = key1\n self.key2 = key2\n self.output_key = output_key\n self.drop_inputs = drop_inputs\n\n def __call__(self, data):\n x = data[self.key1]\n y = data[self.key2]\n prod = _get_product_features(x, y)\n data[self.output_key] = prod\n if self.drop_inputs:\n del data[self.key1], data[self.key2]\n return data\n\n\nclass GetEye:\n \"\"\"\n Place a diagonal tensor into the data dictionary\n \"\"\"\n\n def __init__(self, key: str, size: int):\n self.key = key\n self.size = size\n\n def __call__(self, data):\n x = torch.eye(self.size)\n data[self.key] = x\n return data\n\n\ndef _broadcast_tensors_for_cat(\n tensors: List[torch.Tensor], dim: int\n) -> List[torch.Tensor]:\n \"\"\"\n Broadcast all tensors so that they could be concatenated along the specific dim.\n The tensor shapes have to be broadcastable (after the concatenation dim is taken out)\n\n Example:\n Input tensors of shapes [(10,3,5), (1,3,3)] (dim=2) would get broadcasted to [(10,3,5), (10,3,3)],\n so that they could be concatenated along the last dim.\n \"\"\"\n if dim >= 0:\n dims = [dim] * len(tensors)\n else:\n dims = [t.ndim + dim for t in tensors]\n shapes = [list(t.shape) for t in tensors]\n for s, d in zip(shapes, dims):\n s.pop(d)\n shapes_except_cat_dim = [tuple(s) for s in shapes]\n broadcast_shape = torch.broadcast_shapes(*shapes_except_cat_dim)\n final_shapes = [list(broadcast_shape) for t in tensors]\n for s, t, d in zip(final_shapes, tensors, dims):\n s.insert(d, t.shape[dim])\n final_shapes = [tuple(s) for s in final_shapes]\n return [t.expand(s) for t, s in zip(tensors, final_shapes)]\n\n\nclass Cat:\n \"\"\"\n This transform concatenates the tensors along a specified dim\n \"\"\"\n\n def __init__(\n self, input_keys: List[str], output_key: str, dim: int, broadcast: bool = True\n ):\n self.input_keys = input_keys\n self.output_key = output_key\n self.dim = dim\n self.broadcast = broadcast\n\n def __call__(self, data):\n tensors = []\n for k in self.input_keys:\n tensors.append(data[k])\n if self.broadcast:\n tensors = _broadcast_tensors_for_cat(tensors, self.dim)\n data[self.output_key] = torch.cat(tensors, dim=self.dim)\n return data\n\n\nclass Rename:\n \"\"\"\n Change key names\n \"\"\"\n\n def __init__(self, old_names: List[str], new_names: List[str]):\n self.old_names = old_names\n self.new_names = new_names\n\n def __call__(self, data):\n new_data = dict(data)\n for o, n in zip(self.old_names, self.new_names):\n new_data[n] = new_data.pop(o)\n return new_data\n\n\nclass Filter:\n \"\"\"\n Remove some keys from the dict.\n Can specify keep_keys (they will be kept) or remove_keys (they will be removed)\n \"\"\"\n\n def __init__(\n self,\n *,\n keep_keys: Optional[List[str]] = None,\n remove_keys: Optional[List[str]] = None,\n ):\n assert (keep_keys is None) != (remove_keys is None)\n self.keep_keys = keep_keys\n self.remove_keys = remove_keys\n\n def __call__(self, data):\n if self.keep_keys:\n new_data = {}\n for k in self.keep_keys:\n if k in data:\n new_data[k] = data[k]\n else:\n new_data = dict(data)\n for k in self.remove_keys:\n if k in new_data:\n del new_data[k]\n return new_data\n"
] |
[
[
"torch.ones",
"torch.isnan",
"torch.cat",
"torch.einsum",
"torch.nn.functional.one_hot",
"torch.eye",
"torch.broadcast_shapes",
"torch.arange",
"torch.device",
"numpy.array"
]
] |
AndreHenkel/dl_visualisation_comparison
|
[
"6bd1963d671c6d57c7b89ba5b083599725330fa7"
] |
[
"lrp_framework/lrp/functional/utils.py"
] |
[
"import torch\n\n# # # rhos\nidentity_fn = lambda w, b: (w, b)\n\n\ndef gamma_fn(gamma):\n def _gamma_fn(w, b):\n w = w + w * torch.max(torch.tensor(0., device=w.device), w) * gamma\n if b is not None: b = b + b * torch.max(torch.tensor(0., device=b.device), b) * gamma\n return w, b\n return _gamma_fn\n\n\n# # # incrs\nadd_epsilon_fn = lambda e: lambda x: x + ((x > 0).float()*2-1) * e\n\n\n# # # Other stuff\ndef safe_divide(a, b):\n return a / (b + (b == 0).float())\n\ndef normalize(x):\n n_dim = len(x.shape)\n\n # This is what they do in `innvestigate`. Have no idea why?\n # https://github.com/albermax/innvestigate/blob/1ed38a377262236981090bb0989d2e1a6892a0b1/innvestigate/layers.py#L321\n if n_dim == 2: return x\n\n abs = torch.abs(x.view(x.shape[0], -1))\n absmax = torch.max(abs, axis=1)[0].view(x.shape[0], 1)\n for i in range(2, n_dim): absmax = absmax.unsqueeze(-1)\n\n x = safe_divide(x, absmax)\n x = x.clamp(-1, 1)\n\n return x\n"
] |
[
[
"torch.max",
"torch.tensor"
]
] |
jonathanfrawley/PyAutoGalaxy
|
[
"55fb44f22ce5490318378dc31596c887d0d2e29b"
] |
[
"autogalaxy/plane/plane.py"
] |
[
"import numpy as np\r\n\r\nfrom autoarray.inversion import inversions as inv\r\nfrom autoarray.inversion import pixelizations as pix\r\nfrom autoarray.structures.arrays import values\r\nfrom autoarray.structures.arrays.two_d import array_2d\r\nfrom autoarray.structures.grids.two_d import grid_2d_irregular\r\nfrom autoarray.structures.grids import grid_decorators\r\nfrom autoarray.structures import visibilities as vis\r\nfrom autogalaxy import exc\r\nfrom autogalaxy import lensing\r\nfrom autogalaxy.galaxy import galaxy as g\r\nfrom autogalaxy.util import plane_util\r\n\r\n\r\nclass AbstractPlane(lensing.LensingObject):\r\n def __init__(self, redshift, galaxies):\r\n \"\"\"A plane of galaxies where all galaxies are at the same redshift.\r\n\r\n Parameters\r\n -----------\r\n redshift : float or None\r\n The redshift of the plane.\r\n galaxies : [Galaxy]\r\n The list of galaxies in this plane.\r\n \"\"\"\r\n\r\n if redshift is None:\r\n\r\n if not galaxies:\r\n raise exc.PlaneException(\r\n \"No redshift and no galaxies were input to a Plane. A redshift for the Plane therefore cannot be\"\r\n \"determined\"\r\n )\r\n elif not all(\r\n [galaxies[0].redshift == galaxy.redshift for galaxy in galaxies]\r\n ):\r\n redshift = np.mean([galaxy.redshift for galaxy in galaxies])\r\n else:\r\n redshift = galaxies[0].redshift\r\n\r\n self.redshift = redshift\r\n self.galaxies = galaxies\r\n\r\n @property\r\n def galaxy_redshifts(self):\r\n return [galaxy.redshift for galaxy in self.galaxies]\r\n\r\n @property\r\n def has_light_profile(self):\r\n if self.galaxies is not None:\r\n return any(\r\n list(map(lambda galaxy: galaxy.has_light_profile, self.galaxies))\r\n )\r\n\r\n @property\r\n def has_mass_profile(self):\r\n if self.galaxies is not None:\r\n return any(list(map(lambda galaxy: galaxy.has_mass_profile, self.galaxies)))\r\n\r\n @property\r\n def has_pixelization(self):\r\n return any([galaxy.pixelization for galaxy in self.galaxies])\r\n\r\n @property\r\n def has_regularization(self):\r\n return any([galaxy.regularization for galaxy in self.galaxies])\r\n\r\n @property\r\n def galaxies_with_light_profile(self):\r\n return list(filter(lambda galaxy: galaxy.has_light_profile, self.galaxies))\r\n\r\n @property\r\n def galaxies_with_mass_profile(self):\r\n return list(filter(lambda galaxy: galaxy.has_mass_profile, self.galaxies))\r\n\r\n @property\r\n def galaxies_with_pixelization(self):\r\n return list(filter(lambda galaxy: galaxy.has_pixelization, self.galaxies))\r\n\r\n @property\r\n def galaxies_with_regularization(self):\r\n return list(filter(lambda galaxy: galaxy.has_regularization, self.galaxies))\r\n\r\n @property\r\n def pixelization(self):\r\n\r\n if len(self.galaxies_with_pixelization) == 0:\r\n return None\r\n if len(self.galaxies_with_pixelization) == 1:\r\n return self.galaxies_with_pixelization[0].pixelization\r\n elif len(self.galaxies_with_pixelization) > 1:\r\n raise exc.PixelizationException(\r\n \"The number of galaxies with pixelizations in one plane is above 1\"\r\n )\r\n\r\n @property\r\n def regularization(self):\r\n\r\n if len(self.galaxies_with_regularization) == 0:\r\n return None\r\n if len(self.galaxies_with_regularization) == 1:\r\n return self.galaxies_with_regularization[0].regularization\r\n elif len(self.galaxies_with_regularization) > 1:\r\n raise exc.PixelizationException(\r\n \"The number of galaxies with regularizations in one plane is above 1\"\r\n )\r\n\r\n @property\r\n def hyper_galaxy_image_of_galaxy_with_pixelization(self):\r\n galaxies_with_pixelization = self.galaxies_with_pixelization\r\n if galaxies_with_pixelization:\r\n return galaxies_with_pixelization[0].hyper_galaxy_image\r\n\r\n @property\r\n def has_hyper_galaxy(self):\r\n return any(list(map(lambda galaxy: galaxy.has_hyper_galaxy, self.galaxies)))\r\n\r\n @property\r\n def point_dict(self):\r\n\r\n point_dict = {}\r\n\r\n for galaxy in self.galaxies:\r\n for key, value in galaxy.point_dict.items():\r\n point_dict[key] = value\r\n\r\n return point_dict\r\n\r\n @property\r\n def mass_profiles(self):\r\n return [\r\n item\r\n for mass_profile in self.mass_profiles_of_galaxies\r\n for item in mass_profile\r\n ]\r\n\r\n @property\r\n def mass_profiles_of_galaxies(self):\r\n return [\r\n galaxy.mass_profiles for galaxy in self.galaxies if galaxy.has_mass_profile\r\n ]\r\n\r\n def extract_attribute(self, cls, attr_name):\r\n \"\"\"\r\n Returns an attribute of a class in `Plane` as a `ValueIrregular` or `Grid2DIrregular` object.\r\n\r\n For example, if a plane has a galaxy which two light profiles and we want its axis-ratios, the following:\r\n\r\n `plane.extract_attribute(cls=LightProfile, name=\"axis_ratio\")`\r\n\r\n would return:\r\n\r\n ValuesIrregular(values=[axis_ratio_0, axis_ratio_1])\r\n\r\n If a galaxy has three mass profiles and we want their centres, the following:\r\n\r\n `plane.extract_attribute(cls=MassProfile, name=\"centres\")`\r\n\r\n would return:\r\n\r\n GridIrregular2D(grid=[(centre_y_0, centre_x_0), (centre_y_1, centre_x_1), (centre_y_2, centre_x_2)])\r\n\r\n This is used for visualization, for example plotting the centres of all mass profiles colored by their profile.\r\n \"\"\"\r\n\r\n def extract(value, name):\r\n\r\n try:\r\n return getattr(value, name)\r\n except (AttributeError, IndexError):\r\n return None\r\n\r\n attributes = [\r\n extract(value, attr_name)\r\n for galaxy in self.galaxies\r\n for value in galaxy.__dict__.values()\r\n if isinstance(value, cls)\r\n ]\r\n\r\n if attributes == []:\r\n return None\r\n elif isinstance(attributes[0], float):\r\n return values.ValuesIrregular(values=attributes)\r\n elif isinstance(attributes[0], tuple):\r\n return grid_2d_irregular.Grid2DIrregular(grid=attributes)\r\n\r\n def extract_attributes_of_galaxies(self, cls, attr_name, filter_nones=False):\r\n \"\"\"\r\n Returns an attribute of a class in the plane as a list of `ValueIrregular` or `Grid2DIrregular` objects,\r\n where the list indexes correspond to each galaxy in the plane..\r\n\r\n For example, if a plane has two galaxies which each have a light profile the following:\r\n\r\n `plane.extract_attributes_of_galaxies(cls=LightProfile, name=\"axis_ratio\")`\r\n\r\n would return:\r\n\r\n [ValuesIrregular(values=[axis_ratio_0]), ValuesIrregular(values=[axis_ratio_1])]\r\n\r\n If a plane has two galaxies, the first with a mass profile and the second with two mass profiles ,the following:\r\n\r\n `plane.extract_attributes_of_galaxies(cls=MassProfile, name=\"centres\")`\r\n\r\n would return:\r\n [\r\n Grid2DIrregular(grid=[(centre_y_0, centre_x_0)]),\r\n Grid2DIrregular(grid=[(centre_y_0, centre_x_0), (centre_y_1, centre_x_1)])\r\n ]\r\n\r\n If a Profile does not have a certain entry, it is replaced with a None. Nones can be removed by\r\n setting `filter_nones=True`.\r\n\r\n This is used for visualization, for example plotting the centres of all mass profiles colored by their profile.\r\n \"\"\"\r\n if filter_nones:\r\n\r\n return [\r\n galaxy.extract_attribute(cls=cls, attr_name=attr_name)\r\n for galaxy in self.galaxies\r\n if galaxy.extract_attribute(cls=cls, attr_name=attr_name) is not None\r\n ]\r\n\r\n else:\r\n\r\n return [\r\n galaxy.extract_attribute(cls=cls, attr_name=attr_name)\r\n for galaxy in self.galaxies\r\n ]\r\n\r\n\r\nclass AbstractPlaneLensing(AbstractPlane):\r\n def __init__(self, redshift, galaxies):\r\n super().__init__(redshift=redshift, galaxies=galaxies)\r\n\r\n @grid_decorators.grid_2d_to_structure\r\n def image_2d_from_grid(self, grid):\r\n \"\"\"\r\n Returns the profile-image plane image of the list of galaxies of the plane's sub-grid, by summing the\r\n individual images of each galaxy's light profile.\r\n\r\n The image is calculated on the sub-grid and binned-up to the original grid by taking the mean\r\n value of every set of sub-pixels, provided the *returned_binned_sub_grid* bool is `True`.\r\n\r\n If the plane has no galaxies (or no galaxies have mass profiles) an arrays of all zeros the shape of the plane's\r\n sub-grid is returned.\r\n\r\n Parameters\r\n -----------\r\n\r\n \"\"\"\r\n if self.galaxies:\r\n return sum(\r\n map(lambda galaxy: galaxy.image_2d_from_grid(grid=grid), self.galaxies)\r\n )\r\n return np.zeros((grid.shape[0],))\r\n\r\n def images_of_galaxies_from_grid(self, grid):\r\n return list(\r\n map(lambda galaxy: galaxy.image_2d_from_grid(grid=grid), self.galaxies)\r\n )\r\n\r\n def padded_image_2d_from_grid_and_psf_shape(self, grid, psf_shape_2d):\r\n\r\n padded_grid = grid.padded_grid_from_kernel_shape(\r\n kernel_shape_native=psf_shape_2d\r\n )\r\n\r\n return self.image_2d_from_grid(grid=padded_grid)\r\n\r\n @grid_decorators.grid_2d_to_structure\r\n def convergence_2d_from_grid(self, grid):\r\n \"\"\"\r\n Returns the convergence of the list of galaxies of the plane's sub-grid, by summing the individual convergences \\\r\n of each galaxy's mass profile.\r\n\r\n The convergence is calculated on the sub-grid and binned-up to the original grid by taking the mean\r\n value of every set of sub-pixels, provided the *returned_binned_sub_grid* bool is `True`.\r\n\r\n If the plane has no galaxies (or no galaxies have mass profiles) an arrays of all zeros the shape of the plane's\r\n sub-grid is returned.\r\n\r\n Parameters\r\n -----------\r\n grid : Grid2D\r\n The grid (or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\r\n potential is calculated on.\r\n galaxies : [g.Galaxy]\r\n The galaxies whose mass profiles are used to compute the surface densities.\r\n \"\"\"\r\n if self.galaxies:\r\n return sum(\r\n map(lambda g: g.convergence_2d_from_grid(grid=grid), self.galaxies)\r\n )\r\n return np.zeros(shape=(grid.shape[0],))\r\n\r\n @grid_decorators.grid_2d_to_structure\r\n def potential_2d_from_grid(self, grid):\r\n \"\"\"\r\n Returns the potential of the list of galaxies of the plane's sub-grid, by summing the individual potentials \\\r\n of each galaxy's mass profile.\r\n\r\n The potential is calculated on the sub-grid and binned-up to the original grid by taking the mean\r\n value of every set of sub-pixels, provided the *returned_binned_sub_grid* bool is `True`.\r\n\r\n If the plane has no galaxies (or no galaxies have mass profiles) an arrays of all zeros the shape of the plane's\r\n sub-grid is returned.\r\n\r\n Parameters\r\n -----------\r\n grid : Grid2D\r\n The grid (or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\r\n potential is calculated on.\r\n galaxies : [g.Galaxy]\r\n The galaxies whose mass profiles are used to compute the surface densities.\r\n \"\"\"\r\n if self.galaxies:\r\n return sum(\r\n map(lambda g: g.potential_2d_from_grid(grid=grid), self.galaxies)\r\n )\r\n return np.zeros((grid.shape[0]))\r\n\r\n @grid_decorators.grid_2d_to_structure\r\n def deflections_2d_from_grid(self, grid):\r\n if self.galaxies:\r\n return sum(\r\n map(lambda g: g.deflections_2d_from_grid(grid=grid), self.galaxies)\r\n )\r\n return np.zeros(shape=(grid.shape[0], 2))\r\n\r\n @grid_decorators.grid_2d_to_structure\r\n def traced_grid_from_grid(self, grid):\r\n \"\"\"Trace this plane's grid_stacks to the next plane, using its deflection angles.\"\"\"\r\n return grid - self.deflections_2d_from_grid(grid=grid)\r\n\r\n\r\nclass AbstractPlaneData(AbstractPlaneLensing):\r\n def __init__(self, redshift, galaxies):\r\n\r\n super().__init__(redshift=redshift, galaxies=galaxies)\r\n\r\n def blurred_image_2d_from_grid_and_psf(self, grid, psf, blurring_grid):\r\n\r\n image = self.image_2d_from_grid(grid=grid)\r\n\r\n blurring_image = self.image_2d_from_grid(grid=blurring_grid)\r\n\r\n return psf.convolved_array_from_array_and_mask(\r\n array=image.binned.native + blurring_image.binned.native, mask=grid.mask\r\n )\r\n\r\n def blurred_images_of_galaxies_from_grid_and_psf(self, grid, psf, blurring_grid):\r\n return [\r\n galaxy.blurred_image_2d_from_grid_and_psf(\r\n grid=grid, psf=psf, blurring_grid=blurring_grid\r\n )\r\n for galaxy in self.galaxies\r\n ]\r\n\r\n def blurred_image_2d_from_grid_and_convolver(self, grid, convolver, blurring_grid):\r\n\r\n image = self.image_2d_from_grid(grid=grid)\r\n\r\n blurring_image = self.image_2d_from_grid(grid=blurring_grid)\r\n\r\n return convolver.convolve_image(image=image, blurring_image=blurring_image)\r\n\r\n def blurred_images_of_galaxies_from_grid_and_convolver(\r\n self, grid, convolver, blurring_grid\r\n ):\r\n return [\r\n galaxy.blurred_image_2d_from_grid_and_convolver(\r\n grid=grid, convolver=convolver, blurring_grid=blurring_grid\r\n )\r\n for galaxy in self.galaxies\r\n ]\r\n\r\n def unmasked_blurred_image_2d_from_grid_and_psf(self, grid, psf):\r\n\r\n padded_grid = grid.padded_grid_from_kernel_shape(\r\n kernel_shape_native=psf.shape_native\r\n )\r\n\r\n padded_image = self.image_2d_from_grid(grid=padded_grid)\r\n\r\n return padded_grid.mask.unmasked_blurred_array_from_padded_array_psf_and_image_shape(\r\n padded_array=padded_image, psf=psf, image_shape=grid.mask.shape\r\n )\r\n\r\n def unmasked_blurred_image_of_galaxies_from_grid_and_psf(self, grid, psf):\r\n\r\n padded_grid = grid.padded_grid_from_kernel_shape(\r\n kernel_shape_native=psf.shape_native\r\n )\r\n\r\n unmasked_blurred_images_of_galaxies = []\r\n\r\n for galaxy in self.galaxies:\r\n\r\n padded_image_1d = galaxy.image_2d_from_grid(grid=padded_grid)\r\n\r\n unmasked_blurred_array_2d = padded_grid.mask.unmasked_blurred_array_from_padded_array_psf_and_image_shape(\r\n padded_array=padded_image_1d, psf=psf, image_shape=grid.mask.shape\r\n )\r\n\r\n unmasked_blurred_images_of_galaxies.append(unmasked_blurred_array_2d)\r\n\r\n return unmasked_blurred_images_of_galaxies\r\n\r\n def profile_visibilities_from_grid_and_transformer(self, grid, transformer):\r\n\r\n if self.galaxies:\r\n image = self.image_2d_from_grid(grid=grid)\r\n return transformer.visibilities_from_image(image=image)\r\n else:\r\n return vis.Visibilities.zeros(\r\n shape_slim=(transformer.uv_wavelengths.shape[0],)\r\n )\r\n\r\n def profile_visibilities_of_galaxies_from_grid_and_transformer(\r\n self, grid, transformer\r\n ):\r\n return [\r\n galaxy.profile_visibilities_from_grid_and_transformer(\r\n grid=grid, transformer=transformer\r\n )\r\n for galaxy in self.galaxies\r\n ]\r\n\r\n def sparse_image_plane_grid_from_grid(\r\n self, grid, settings_pixelization=pix.SettingsPixelization()\r\n ):\r\n\r\n if not self.has_pixelization:\r\n return None\r\n\r\n hyper_galaxy_image = self.hyper_galaxy_image_of_galaxy_with_pixelization\r\n\r\n return self.pixelization.sparse_grid_from_grid(\r\n grid=grid, hyper_image=hyper_galaxy_image, settings=settings_pixelization\r\n )\r\n\r\n def mapper_from_grid_and_sparse_grid(\r\n self,\r\n grid,\r\n sparse_grid,\r\n sparse_image_plane_grid=None,\r\n settings_pixelization=pix.SettingsPixelization(),\r\n ):\r\n\r\n galaxies_with_pixelization = list(\r\n filter(lambda galaxy: galaxy.pixelization is not None, self.galaxies)\r\n )\r\n\r\n if len(galaxies_with_pixelization) == 0:\r\n return None\r\n if len(galaxies_with_pixelization) == 1:\r\n\r\n pixelization = galaxies_with_pixelization[0].pixelization\r\n\r\n return pixelization.mapper_from_grid_and_sparse_grid(\r\n grid=grid,\r\n sparse_grid=sparse_grid,\r\n sparse_image_plane_grid=sparse_image_plane_grid,\r\n hyper_image=galaxies_with_pixelization[0].hyper_galaxy_image,\r\n settings=settings_pixelization,\r\n )\r\n\r\n elif len(galaxies_with_pixelization) > 1:\r\n raise exc.PixelizationException(\r\n \"The number of galaxies with pixelizations in one plane is above 1\"\r\n )\r\n\r\n def inversion_imaging_from_grid_and_data(\r\n self,\r\n grid,\r\n image,\r\n noise_map,\r\n convolver,\r\n settings_pixelization=pix.SettingsPixelization(),\r\n settings_inversion=inv.SettingsInversion(),\r\n ):\r\n\r\n sparse_grid = self.sparse_image_plane_grid_from_grid(grid=grid)\r\n\r\n mapper = self.mapper_from_grid_and_sparse_grid(\r\n grid=grid,\r\n sparse_grid=sparse_grid,\r\n settings_pixelization=settings_pixelization,\r\n )\r\n\r\n return inv.InversionImagingMatrix.from_data_mapper_and_regularization(\r\n image=image,\r\n noise_map=noise_map,\r\n convolver=convolver,\r\n mapper=mapper,\r\n regularization=self.regularization,\r\n settings=settings_inversion,\r\n )\r\n\r\n def inversion_interferometer_from_grid_and_data(\r\n self,\r\n grid,\r\n visibilities,\r\n noise_map,\r\n transformer,\r\n settings_pixelization=pix.SettingsPixelization(),\r\n settings_inversion=inv.SettingsInversion(),\r\n ):\r\n\r\n sparse_grid = self.sparse_image_plane_grid_from_grid(grid=grid)\r\n\r\n mapper = self.mapper_from_grid_and_sparse_grid(\r\n grid=grid,\r\n sparse_grid=sparse_grid,\r\n settings_pixelization=settings_pixelization,\r\n )\r\n\r\n return inv.AbstractInversionInterferometer.from_data_mapper_and_regularization(\r\n visibilities=visibilities,\r\n noise_map=noise_map,\r\n transformer=transformer,\r\n mapper=mapper,\r\n regularization=self.regularization,\r\n settings=settings_inversion,\r\n )\r\n\r\n def plane_image_2d_from_grid(self, grid):\r\n return plane_util.plane_image_of_galaxies_from(\r\n shape=grid.mask.shape,\r\n grid=grid.mask.unmasked_grid_sub_1,\r\n galaxies=self.galaxies,\r\n )\r\n\r\n def hyper_noise_map_from_noise_map(self, noise_map):\r\n hyper_noise_maps = self.hyper_noise_maps_of_galaxies_from_noise_map(\r\n noise_map=noise_map\r\n )\r\n return sum(hyper_noise_maps)\r\n\r\n def hyper_noise_maps_of_galaxies_from_noise_map(self, noise_map):\r\n \"\"\"For a contribution map and noise-map, use the model hyper_galaxy galaxies to compute a hyper noise-map.\r\n\r\n Parameters\r\n -----------\r\n noise_map : imaging.NoiseMap or ndarray\r\n An arrays describing the RMS standard deviation error in each pixel, preferably in units of electrons per\r\n second.\r\n \"\"\"\r\n hyper_noise_maps = []\r\n\r\n for galaxy in self.galaxies:\r\n\r\n if galaxy.has_hyper_galaxy:\r\n\r\n hyper_noise_map_1d = galaxy.hyper_galaxy.hyper_noise_map_from_hyper_images_and_noise_map(\r\n noise_map=noise_map,\r\n hyper_model_image=galaxy.hyper_model_image,\r\n hyper_galaxy_image=galaxy.hyper_galaxy_image,\r\n )\r\n\r\n hyper_noise_maps.append(hyper_noise_map_1d)\r\n\r\n else:\r\n\r\n hyper_noise_map = array_2d.Array2D.manual_mask(\r\n array=np.zeros(noise_map.mask.mask_sub_1.pixels_in_mask),\r\n mask=noise_map.mask.mask_sub_1,\r\n )\r\n\r\n hyper_noise_maps.append(hyper_noise_map)\r\n\r\n return hyper_noise_maps\r\n\r\n @property\r\n def contribution_map(self):\r\n\r\n contribution_maps = self.contribution_maps_of_galaxies\r\n\r\n contribution_maps = [i for i in contribution_maps if i is not None]\r\n\r\n if contribution_maps:\r\n return sum(contribution_maps)\r\n else:\r\n return None\r\n\r\n @property\r\n def contribution_maps_of_galaxies(self):\r\n\r\n contribution_maps = []\r\n\r\n for galaxy in self.galaxies:\r\n\r\n if galaxy.hyper_galaxy is not None:\r\n\r\n contribution_maps.append(galaxy.contribution_map)\r\n\r\n else:\r\n\r\n contribution_maps.append(None)\r\n\r\n return contribution_maps\r\n\r\n def galaxy_image_dict_from_grid(self, grid) -> {g.Galaxy: np.ndarray}:\r\n \"\"\"\r\n A dictionary associating galaxies with their corresponding model images\r\n \"\"\"\r\n\r\n galaxy_image_dict = dict()\r\n\r\n images_of_galaxies = self.images_of_galaxies_from_grid(grid=grid)\r\n for (galaxy_index, galaxy) in enumerate(self.galaxies):\r\n galaxy_image_dict[galaxy] = images_of_galaxies[galaxy_index]\r\n\r\n return galaxy_image_dict\r\n\r\n def galaxy_blurred_image_dict_from_grid_and_convolver(\r\n self, grid, convolver, blurring_grid\r\n ) -> {g.Galaxy: np.ndarray}:\r\n \"\"\"\r\n A dictionary associating galaxies with their corresponding model images\r\n \"\"\"\r\n\r\n galaxy_blurred_image_dict = dict()\r\n\r\n blurred_images_of_galaxies = self.blurred_images_of_galaxies_from_grid_and_convolver(\r\n grid=grid, convolver=convolver, blurring_grid=blurring_grid\r\n )\r\n for (galaxy_index, galaxy) in enumerate(self.galaxies):\r\n galaxy_blurred_image_dict[galaxy] = blurred_images_of_galaxies[galaxy_index]\r\n\r\n return galaxy_blurred_image_dict\r\n\r\n def galaxy_profile_visibilities_dict_from_grid_and_transformer(\r\n self, grid, transformer\r\n ) -> {g.Galaxy: np.ndarray}:\r\n \"\"\"\r\n A dictionary associating galaxies with their corresponding model images\r\n \"\"\"\r\n\r\n galaxy_profile_visibilities_image_dict = dict()\r\n\r\n profile_visibilities_of_galaxies = self.profile_visibilities_of_galaxies_from_grid_and_transformer(\r\n grid=grid, transformer=transformer\r\n )\r\n for (galaxy_index, galaxy) in enumerate(self.galaxies):\r\n galaxy_profile_visibilities_image_dict[\r\n galaxy\r\n ] = profile_visibilities_of_galaxies[galaxy_index]\r\n\r\n return galaxy_profile_visibilities_image_dict\r\n\r\n\r\nclass Plane(AbstractPlaneData):\r\n def __init__(self, redshift=None, galaxies=None):\r\n\r\n super(Plane, self).__init__(redshift=redshift, galaxies=galaxies)\r\n\r\n\r\nclass PlaneImage:\r\n def __init__(self, array, grid):\r\n\r\n self.array = array\r\n self.grid = grid\r\n"
] |
[
[
"numpy.zeros",
"numpy.mean"
]
] |
ontheskyl/SSD
|
[
"223dc14b780748ef627201a52cceabfde65e34fc"
] |
[
"ssd/engine/inference.py"
] |
[
"import logging\nimport os\n\nimport torch\nimport torch.utils.data\nfrom tqdm import tqdm\n\nfrom ssd.data.build import make_data_loader\nfrom ssd.data.datasets.evaluation import evaluate\n\nfrom ssd.utils import dist_util, mkdir\nfrom ssd.utils.dist_util import synchronize, is_main_process\nimport cv2\n\ndef _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):\n all_predictions = dist_util.all_gather(predictions_per_gpu)\n if not dist_util.is_main_process():\n return\n # merge the list of dicts\n predictions = {}\n for p in all_predictions:\n predictions.update(p)\n # convert a dict where the key is the index in a list\n image_ids = list(sorted(predictions.keys()))\n if len(image_ids) != image_ids[-1] + 1:\n logger = logging.getLogger(\"SSD.inference\")\n logger.warning(\n \"Number of images that were gathered from multiple processes is not \"\n \"a contiguous set. Some images might be missing from the evaluation\"\n )\n\n # convert to a list\n predictions = [predictions[i] for i in image_ids]\n return predictions\n\n\ndef compute_on_dataset(model, data_loader, device):\n results_dict = {}\n for batch in tqdm(data_loader):\n images, targets, image_ids = batch\n cpu_device = torch.device(\"cpu\")\n with torch.no_grad():\n outputs = model(images.to(device))\n\n outputs = [o.to(cpu_device) for o in outputs]\n results_dict.update(\n {int(img_id): result for img_id, result in zip(image_ids, outputs)}\n )\n return results_dict\n\n\ndef inference(model, data_loader, dataset_name, device, output_folder=None, use_cached=False, allow_write_img = False, image_size = 512, **kwargs):\n dataset = data_loader.dataset\n logger = logging.getLogger(\"SSD.inference\")\n logger.info(\"Evaluating {} dataset({} images):\".format(dataset_name, len(dataset)))\n predictions_path = os.path.join(output_folder, 'predictions.pth')\n if use_cached and os.path.exists(predictions_path):\n predictions = torch.load(predictions_path, map_location='cpu')\n else:\n predictions = compute_on_dataset(model, data_loader, device)\n synchronize()\n predictions = _accumulate_predictions_from_multiple_gpus(predictions)\n if not is_main_process():\n return\n if output_folder:\n torch.save(predictions, predictions_path)\n\n if (allow_write_img):\n if (not os.path.isdir(\"eval_results\")):\n os.mkdir(\"eval_results\")\n\n LABEL = dataset.class_names\n for i in range(len(dataset)):\n image_id, annotation = dataset.get_annotation(i)\n img = dataset._read_image(image_id)\n\n img_info = dataset.get_img_info(i)\n prediction = predictions[i]\n boxes, labels, scores = prediction['boxes'], prediction['labels'], prediction['scores']\n\n for i in range(len(boxes)):\n b1 = int(max(boxes[i][0] * img_info[\"width\"] / image_size, 0))\n b2 = int(max(boxes[i][1] * img_info[\"height\"] / image_size, 0))\n b3 = int(min(boxes[i][2] * img_info[\"width\"] / image_size, img_info[\"width\"]))\n b4 = int(min(boxes[i][3] * img_info[\"height\"] / image_size, img_info[\"height\"]))\n img = cv2.rectangle(img, (b1, b2), (b3, b4), (255, 0, 0), 2)\n img = cv2.putText(img, \"{}\".format(LABEL[labels[i]]), (b1, b2 - 30), cv2.FONT_HERSHEY_SIMPLEX, \n 0.8, (0, 0, 255), 2, cv2.LINE_AA)\n img = cv2.putText(img, \"{}\".format(round(float(scores[i]), 2)), (b1, b2 - 5), cv2.FONT_HERSHEY_SIMPLEX, \n 0.8, (0, 0, 255), 2, cv2.LINE_AA)\n\n cv2.imwrite(os.path.join(\"eval_results\", \"{}.jpg\".format(image_id)), img)\n return evaluate(dataset=dataset, predictions=predictions, output_dir=output_folder, **kwargs)\n\n\[email protected]_grad()\ndef do_evaluation(cfg, model, distributed, check_write_img = False, check_9_labels = False, **kwargs):\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n model = model.module\n model.eval()\n device = torch.device(cfg.MODEL.DEVICE)\n data_loaders_val = make_data_loader(cfg, is_train=False, distributed=distributed, check_9_labels=check_9_labels)\n eval_results = []\n for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n if not os.path.exists(output_folder):\n mkdir(output_folder)\n eval_result = inference(model, data_loader, dataset_name, device, output_folder, allow_write_img=check_write_img, image_size = cfg.INPUT.IMAGE_SIZE, **kwargs)\n eval_results.append(eval_result)\n return eval_results\n"
] |
[
[
"torch.device",
"torch.save",
"torch.no_grad",
"torch.load"
]
] |
MarioBonse/flightmare
|
[
"4b8f53515914afe268bf89eb93a15fc33547e6b2"
] |
[
"flightrl/rpg_baselines/common/distributions.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import math_ops\nfrom gym import spaces\n\nfrom stable_baselines.common.tf_layers import linear\n\nEPS = 1e-6 # Avoid NaN (prevents division by zero or log of zero)\n# CAP the standard deviation of the actor\nLOG_STD_MAX = 2\nLOG_STD_MIN = -20\n\nclass ProbabilityDistribution(object):\n \"\"\"\n Base class for describing a probability distribution.\n \"\"\"\n def __init__(self):\n super(ProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n \"\"\"\n Return the direct probabilities\n\n :return: ([float]) the probabilities\n \"\"\"\n raise NotImplementedError\n\n def mode(self):\n \"\"\"\n Returns the probability\n\n :return: (Tensorflow Tensor) the deterministic action\n \"\"\"\n raise NotImplementedError\n\n def neglogp(self, x):\n \"\"\"\n returns the of the negative log likelihood\n\n :param x: (str) the labels of each index\n :return: ([float]) The negative log likelihood of the distribution\n \"\"\"\n # Usually it's easier to define the negative logprob\n raise NotImplementedError\n\n def kl(self, other):\n \"\"\"\n Calculates the Kullback-Leibler divergence from the given probability distribution\n\n :param other: ([float]) the distribution to compare with\n :return: (float) the KL divergence of the two distributions\n \"\"\"\n raise NotImplementedError\n\n def entropy(self):\n \"\"\"\n Returns Shannon's entropy of the probability\n\n :return: (float) the entropy\n \"\"\"\n raise NotImplementedError\n\n def sample(self):\n \"\"\"\n returns a sample from the probability distribution\n\n :return: (Tensorflow Tensor) the stochastic action\n \"\"\"\n raise NotImplementedError\n\n def logp(self, x):\n \"\"\"\n returns the of the log likelihood\n\n :param x: (str) the labels of each index\n :return: ([float]) The log likelihood of the distribution\n \"\"\"\n return - self.neglogp(x)\n\n\nclass ProbabilityDistributionType(object):\n \"\"\"\n Parametrized family of probability distributions\n \"\"\"\n\n def probability_distribution_class(self):\n \"\"\"\n returns the ProbabilityDistribution class of this type\n\n :return: (Type ProbabilityDistribution) the probability distribution class associated\n \"\"\"\n raise NotImplementedError\n\n def proba_distribution_from_flat(self, flat):\n \"\"\"\n Returns the probability distribution from flat probabilities\n flat: flattened vector of parameters of probability distribution\n\n :param flat: ([float]) the flat probabilities\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n return self.probability_distribution_class()(flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n \"\"\"\n returns the probability distribution from latent values\n\n :param pi_latent_vector: ([float]) the latent pi values\n :param vf_latent_vector: ([float]) the latent vf values\n :param init_scale: (float) the initial scale of the distribution\n :param init_bias: (float) the initial bias of the distribution\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n raise NotImplementedError\n\n def param_shape(self):\n \"\"\"\n returns the shape of the input parameters\n\n :return: ([int]) the shape\n \"\"\"\n raise NotImplementedError\n\n def sample_shape(self):\n \"\"\"\n returns the shape of the sampling\n\n :return: ([int]) the shape\n \"\"\"\n raise NotImplementedError\n\n def sample_dtype(self):\n \"\"\"\n returns the type of the sampling\n\n :return: (type) the type\n \"\"\"\n raise NotImplementedError\n\n def param_placeholder(self, prepend_shape, name=None):\n \"\"\"\n returns the TensorFlow placeholder for the input parameters\n\n :param prepend_shape: ([int]) the prepend shape\n :param name: (str) the placeholder name\n :return: (TensorFlow Tensor) the placeholder\n \"\"\"\n return tf.placeholder(dtype=tf.float32, shape=prepend_shape + self.param_shape(), name=name)\n\n def sample_placeholder(self, prepend_shape, name=None):\n \"\"\"\n returns the TensorFlow placeholder for the sampling\n\n :param prepend_shape: ([int]) the prepend shape\n :param name: (str) the placeholder name\n :return: (TensorFlow Tensor) the placeholder\n \"\"\"\n return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape + self.sample_shape(), name=name)\n\n\nclass CategoricalProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, n_cat):\n \"\"\"\n The probability distribution type for categorical input\n\n :param n_cat: (int) the number of categories\n \"\"\"\n self.n_cat = n_cat\n\n def probability_distribution_class(self):\n return CategoricalProbabilityDistribution\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', self.n_cat, init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', self.n_cat, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [self.n_cat]\n\n def sample_shape(self):\n return []\n\n def sample_dtype(self):\n return tf.int64\n\n\nclass MultiCategoricalProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, n_vec):\n \"\"\"\n The probability distribution type for multiple categorical input\n\n :param n_vec: ([int]) the vectors\n \"\"\"\n # Cast the variable because tf does not allow uint32\n self.n_vec = n_vec.astype(np.int32)\n # Check that the cast was valid\n assert (self.n_vec > 0).all(), \"Casting uint32 to int32 was invalid\"\n\n def probability_distribution_class(self):\n return MultiCategoricalProbabilityDistribution\n\n def proba_distribution_from_flat(self, flat):\n return MultiCategoricalProbabilityDistribution(self.n_vec, flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', sum(self.n_vec), init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', sum(self.n_vec), init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [sum(self.n_vec)]\n\n def sample_shape(self):\n return [len(self.n_vec)]\n\n def sample_dtype(self):\n return tf.int64\n\n\nclass DiagGaussianProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, size):\n \"\"\"\n The probability distribution type for multivariate Gaussian input\n\n :param size: (int) the number of dimensions of the multivariate gaussian\n \"\"\"\n self.size = size\n\n def probability_distribution_class(self):\n return DiagGaussianProbabilityDistribution\n\n def proba_distribution_from_flat(self, flat):\n \"\"\"\n returns the probability distribution from flat probabilities\n\n :param flat: ([float]) the flat probabilities\n :return: (ProbabilityDistribution) the instance of the ProbabilityDistribution associated\n \"\"\"\n return self.probability_distribution_class()(flat)\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n mean = linear(pi_latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)\n logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())\n pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)\n q_values = linear(vf_latent_vector, 'q', self.size, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), mean, q_values\n\n def param_shape(self):\n return [2 * self.size]\n\n def sample_shape(self):\n return [self.size]\n\n def sample_dtype(self):\n return tf.float32\n\n\nclass BernoulliProbabilityDistributionType(ProbabilityDistributionType):\n def __init__(self, size):\n \"\"\"\n The probability distribution type for Bernoulli input\n\n :param size: (int) the number of dimensions of the Bernoulli distribution\n \"\"\"\n self.size = size\n\n def probability_distribution_class(self):\n return BernoulliProbabilityDistribution\n\n def proba_distribution_from_latent(self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0):\n pdparam = linear(pi_latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)\n q_values = linear(vf_latent_vector, 'q', self.size, init_scale=init_scale, init_bias=init_bias)\n return self.proba_distribution_from_flat(pdparam), pdparam, q_values\n\n def param_shape(self):\n return [self.size]\n\n def sample_shape(self):\n return [self.size]\n\n def sample_dtype(self):\n return tf.int32\n\n\nclass CategoricalProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, logits):\n \"\"\"\n Probability distributions from categorical input\n\n :param logits: ([float]) the categorical logits input\n \"\"\"\n self.logits = logits\n super(CategoricalProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.logits\n\n def mode(self):\n return tf.argmax(self.logits, axis=-1)\n\n def neglogp(self, x):\n # Note: we can't use sparse_softmax_cross_entropy_with_logits because\n # the implementation does not allow second-order derivatives...\n one_hot_actions = tf.one_hot(x, self.logits.get_shape().as_list()[-1])\n return tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=self.logits,\n labels=tf.stop_gradient(one_hot_actions))\n\n def kl(self, other):\n a_0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)\n a_1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True)\n exp_a_0 = tf.exp(a_0)\n exp_a_1 = tf.exp(a_1)\n z_0 = tf.reduce_sum(exp_a_0, axis=-1, keepdims=True)\n z_1 = tf.reduce_sum(exp_a_1, axis=-1, keepdims=True)\n p_0 = exp_a_0 / z_0\n return tf.reduce_sum(p_0 * (a_0 - tf.log(z_0) - a_1 + tf.log(z_1)), axis=-1)\n\n def entropy(self):\n a_0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)\n exp_a_0 = tf.exp(a_0)\n z_0 = tf.reduce_sum(exp_a_0, axis=-1, keepdims=True)\n p_0 = exp_a_0 / z_0\n return tf.reduce_sum(p_0 * (tf.log(z_0) - a_0), axis=-1)\n\n def sample(self):\n # Gumbel-max trick to sample\n # a categorical distribution (see http://amid.fish/humble-gumbel)\n uniform = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype)\n return tf.argmax(self.logits - tf.log(-tf.log(uniform)), axis=-1)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new logits values\n\n :param flat: ([float]) the categorical logits input\n :return: (ProbabilityDistribution) the instance from the given categorical input\n \"\"\"\n return cls(flat)\n\n\nclass MultiCategoricalProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, nvec, flat):\n \"\"\"\n Probability distributions from multicategorical input\n\n :param nvec: ([int]) the sizes of the different categorical inputs\n :param flat: ([float]) the categorical logits input\n \"\"\"\n self.flat = flat\n self.categoricals = list(map(CategoricalProbabilityDistribution, tf.split(flat, nvec, axis=-1)))\n super(MultiCategoricalProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.flat\n\n def mode(self):\n return tf.stack([p.mode() for p in self.categoricals], axis=-1)\n\n def neglogp(self, x):\n return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))])\n\n def kl(self, other):\n return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)])\n\n def entropy(self):\n return tf.add_n([p.entropy() for p in self.categoricals])\n\n def sample(self):\n return tf.stack([p.sample() for p in self.categoricals], axis=-1)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new logits values\n\n :param flat: ([float]) the multi categorical logits input\n :return: (ProbabilityDistribution) the instance from the given multi categorical input\n \"\"\"\n raise NotImplementedError\n\n\nclass DiagGaussianProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, flat):\n \"\"\"\n Probability distributions from multivariate Gaussian input\n\n :param flat: ([float]) the multivariate Gaussian input data\n \"\"\"\n self.flat = flat\n mean, logstd = tf.split(axis=len(flat.shape) - 1, num_or_size_splits=2, value=flat)\n self.mean = mean\n # self.logstd = tf.clip_by_value(logstd, LOG_STD_MIN, LOG_STD_MAX)\n self.logstd = logstd\n self.std = tf.exp(logstd)\n super(DiagGaussianProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.flat\n\n def mode(self):\n # Bounds are taken into account outside this class (during training only)\n return self.mean\n\n def neglogp(self, x):\n return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \\\n + 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(x)[-1], tf.float32) \\\n + tf.reduce_sum(self.logstd, axis=-1)\n\n def tanh_neglogp(self, u):\n logp = - self.neglogp(u)\n tanh_logp = logp - tf.reduce_sum(tf.log(1-tf.tanh(u)**2 + 1e-6), axis=1)\n return -tanh_logp\n \n def kl(self, other):\n assert isinstance(other, DiagGaussianProbabilityDistribution)\n return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) /\n (2.0 * tf.square(other.std)) - 0.5, axis=-1)\n\n def entropy(self):\n return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)\n\n def sample(self):\n # Bounds are taken into acount outside this class (during training only)\n # Otherwise, it changes the distribution and breaks PPO2 for instance\n return self.mean + self.std * tf.random_normal(tf.shape(self.mean),\n dtype=self.mean.dtype)\n \n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new multivariate Gaussian input\n\n :param flat: ([float]) the multivariate Gaussian input data\n :return: (ProbabilityDistribution) the instance from the given multivariate Gaussian input data\n \"\"\"\n return cls(flat)\n\n\nclass BernoulliProbabilityDistribution(ProbabilityDistribution):\n def __init__(self, logits):\n \"\"\"\n Probability distributions from Bernoulli input\n\n :param logits: ([float]) the Bernoulli input data\n \"\"\"\n self.logits = logits\n self.probabilities = tf.sigmoid(logits)\n super(BernoulliProbabilityDistribution, self).__init__()\n\n def flatparam(self):\n return self.logits\n\n def mode(self):\n return tf.round(self.probabilities)\n\n def neglogp(self, x):\n return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=tf.cast(x, tf.float32)),\n axis=-1)\n\n def kl(self, other):\n return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits,\n labels=self.probabilities), axis=-1) - \\\n tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=self.probabilities), axis=-1)\n\n def entropy(self):\n return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,\n labels=self.probabilities), axis=-1)\n\n def sample(self):\n samples_from_uniform = tf.random_uniform(tf.shape(self.probabilities))\n return tf.cast(math_ops.less(samples_from_uniform, self.probabilities), tf.float32)\n\n @classmethod\n def fromflat(cls, flat):\n \"\"\"\n Create an instance of this from new Bernoulli input\n\n :param flat: ([float]) the Bernoulli input data\n :return: (ProbabilityDistribution) the instance from the given Bernoulli input data\n \"\"\"\n return cls(flat)\n\n\ndef make_proba_dist_type(ac_space):\n \"\"\"\n return an instance of ProbabilityDistributionType for the correct type of action space\n\n :param ac_space: (Gym Space) the input action space\n :return: (ProbabilityDistributionType) the appropriate instance of a ProbabilityDistributionType\n \"\"\"\n if isinstance(ac_space, spaces.Box):\n assert len(ac_space.shape) == 1, \"Error: the action space must be a vector\"\n return DiagGaussianProbabilityDistributionType(ac_space.shape[0])\n elif isinstance(ac_space, spaces.Discrete):\n return CategoricalProbabilityDistributionType(ac_space.n)\n elif isinstance(ac_space, spaces.MultiDiscrete):\n return MultiCategoricalProbabilityDistributionType(ac_space.nvec)\n elif isinstance(ac_space, spaces.MultiBinary):\n return BernoulliProbabilityDistributionType(ac_space.n)\n else:\n raise NotImplementedError(\"Error: probability distribution, not implemented for action space of type {}.\"\n .format(type(ac_space)) +\n \" Must be of type Gym Spaces: Box, Discrete, MultiDiscrete or MultiBinary.\")\n\n\ndef shape_el(tensor, index):\n \"\"\"\n get the shape of a TensorFlow Tensor element\n\n :param tensor: (TensorFlow Tensor) the input tensor\n :param index: (int) the element\n :return: ([int]) the shape\n \"\"\"\n maybe = tensor.get_shape()[index]\n if maybe is not None:\n return maybe\n else:\n return tf.shape(tensor)[index]\n"
] |
[
[
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.tanh",
"tensorflow.python.ops.math_ops.less",
"tensorflow.stop_gradient",
"tensorflow.square",
"tensorflow.argmax",
"numpy.log",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.zeros_initializer",
"tensorflow.exp",
"tensorflow.split",
"tensorflow.round",
"tensorflow.reduce_max",
"tensorflow.sigmoid",
"tensorflow.log"
]
] |
KChen-lab/marker-selection
|
[
"3d6b015655095540ba3a0ab7e8682d4355065ad5"
] |
[
"scmer/_umap_l1.py"
] |
[
"from typing import List, Union, Optional\n\nimport torch\nfrom typing import Type\nfrom ._interfaces import _ABCSelector, _ABCTorchModel\nfrom ._base_selector import _BaseSelector\nfrom ._owlqn import OWLQN\n\nimport warnings\nimport multiprocessing\nimport numpy as np\n\nfrom sklearn.decomposition import PCA\n\nfrom ._utils import TicToc, VerbosePrint\nfrom ._umap_torch_models import _RegUmapModel, _StratifiedRegUmapModel # , _SimpleRegTsneModel\n\n\nclass UmapL1(_BaseSelector):\n def __init__(self, *, w: Union[float, str, list, np.ndarray] = 'ones',\n lasso: float = 1e-4, n_pcs: Optional[int] = None, perplexity: float = 30.,\n use_beta_in_Q: bool = True,\n max_outer_iter: int = 5, max_inner_iter: int = 20, owlqn_history_size: int = 100,\n eps: float = 1e-12, verbosity: int = 2, torch_precision: Union[int, str, torch.dtype] = 32,\n torch_cdist_compute_mode: str = \"use_mm_for_euclid_dist\",\n t_distr: bool = True, n_threads: int = 1, use_gpu: bool = False, pca_seed: int = 0, ridge: float = 0.,\n _keep_fitting_info: bool = False):\n \"\"\"\n UmapL1 model\n\n :param w: initial value of w, weight of each marker. Acceptable values are 'ones' (all 1),\n 'uniform' (random [0, 1] values), float numbers (all set to that number),\n or a list or numpy array with specific numbers.\n :param lasso: lasso strength (i.e., strength of L1 regularization in elastic net)\n :param n_pcs: Number of PCs used to generate P matrix. Skip PCA if set to `None`.\n :param perplexity: perplexity of t-SNE modeling\n :param use_beta_in_Q: whether to use the cell specific sigma^2 calculated from P in Q. (1 / beta)\n :param max_outer_iter: number of iterations of OWL-QN\n :param max_inner_iter: number of iterations inside OWL-QN\n :param owlqn_history_size: history size for OWL-QN.\n :param eps: epsilon for considering a value to be 0.\n :param verbosity: verbosity level (0 ~ 2).\n :param torch_precision: The dtype used inside torch model. By default, tf.float32 (a.k.a. tf.float) is used.\n However, if precision become an issue, tf.float64 may be worth trying. You can input 32, \"32\", 64, or \"64\".\n :param torch_cdist_compute_mode: cdist_compute_mode: compute mode for torch.cdist. By default,\n \"use_mm_for_euclid_dist\" to (daramatically) improve performance. However, if numerical stability became an\n issue, \"donot_use_mm_for_euclid_dist\" may be used instead. This option does not affect distances computed\n outside of pytorch, e.g., matrix P. Only matrix Q is affect.\n :param t_distr: By default, use t-distribution (1. / (1. + pdist2)) for Q.\n Use Normal distribution instead (exp(-pdist2)) if set to False. The latter one is not stable.\n :param n_threads: number of threads (currently only for calculating P and beta)\n :param use_gpu: whether to use GPU to train the model.\n :param pca_seed: random seed used by PCA (if applicable)\n :param ridge: ridge strength (i.e., strength of L2 regularization in elastic net)\n :param _keep_fitting_info: if `True`, write similarity matrix P to `self.P` and PyTorch model to `self.model`\n \"\"\"\n super(UmapL1, self).__init__(w, lasso, n_pcs, perplexity, use_beta_in_Q, max_outer_iter, max_inner_iter,\n owlqn_history_size, eps, verbosity, torch_precision, torch_cdist_compute_mode,\n t_distr, n_threads, use_gpu, pca_seed, ridge, _keep_fitting_info)\n\n\n def fit(self, X, *, X_teacher=None, batches=None, P=None, beta=None, must_keep=None):\n \"\"\"\n Select markers from one dataset to keep the cell-cell similarities in the same dataset\n\n :param X: data matrix (cells (rows) x genes/proteins (columns))\n :param X_teacher: get target similarities from this dataset\n :param batches: (optional) batch labels\n :param P: The P matrix, if calculated in advance\n :param beta: The beta associated with P, if calculated in advance\n :param must_keep: A boolean vector indicating if a feature must be kept.\n Those features will have a fixed weight 1.\n :return:\n \"\"\"\n tictoc = TicToc()\n trans = True\n if X_teacher is None: # if there is no other assay to mimic, just mimic itself\n X_teacher = X\n trans = False\n\n if batches is None:\n if must_keep is None and (isinstance(self._lasso, float) or isinstance(self._lasso, str)):\n model_class = _RegUmapModel #_SimpleRegTsneModel\n else:\n model_class = _RegUmapModel\n\n if self._n_pcs is None:\n P, beta = self._resolve_P_beta(X_teacher, P, beta, self._perplexity, tictoc, self.verbose_print.prints,\n self._n_threads)\n else:\n pcs = PCA(self._n_pcs, random_state=self._pca_seed).fit_transform(X_teacher)\n # print(pcs)\n P, beta = self._resolve_P_beta(pcs, P, beta, self._perplexity, tictoc, self.verbose_print.prints,\n self._n_threads)\n else:\n model_class = _StratifiedRegUmapModel\n if P is None:\n if trans:\n Xs = []\n for batch in np.unique(batches):\n batch_mask = (batches == batch)\n Xs.append(X[batch_mask, :])\n X = Xs\n _, P, beta = self._resolve_batches(X_teacher, None, batches, self._n_pcs, self._perplexity, tictoc,\n self.verbose_print, self._pca_seed, self._n_threads)\n else:\n X, P, beta = self._resolve_batches(X_teacher, None, batches, self._n_pcs, self._perplexity, tictoc,\n self.verbose_print, self._pca_seed, self._n_threads)\n else:\n raise NotImplementedError()\n\n if self._keep_fitting_info:\n self.P = P\n\n return self._fit_core(X, P, beta, must_keep, model_class, tictoc)\n\n def get_mask(self, target_n_features=None):\n \"\"\"\n Get the feature selection mask.\n For AnnData in scanpy, it can be used as adata[:, model.get_mask()]\n\n :param target_n_features: If None, all features with w > 0 are selected. If not None, only select\n `target_n_features` largest features\n :return: mask\n \"\"\"\n if target_n_features is None:\n return self.w > 0.\n else:\n n_nonzero = (self.w > 0.).sum()\n if target_n_features > n_nonzero:\n raise ValueError(f\"Only {n_nonzero} features have nonzero weights. \"\n f\"target_n_features may not exceed the number.\")\n return self.w >= self.w[np.argpartition(self.w, -target_n_features)[-target_n_features]]\n\n def transform(self, X, target_n_features=None, **kwargs):\n \"\"\"\n Shrink a matrix / AnnData object with full markers to the selected markers only.\n If such operation is not supported by your data object,\n you can do it manually using :func:`~UmapL1.get_mask`.\n\n :param X: Matrix / AnnData to be shrunk\n :param target_n_features: If None, all features with w > 0 are selected. If not None, only select\n `target_n_features` largest features\n :return: Shrunk matrix / Anndata\n \"\"\"\n return X[:, self.get_mask(target_n_features)]\n\n def fit_transform(self, X, **kwargs):\n \"\"\"\n Fit on a matrix / AnnData and then transfer it.\n\n :param X: The matrix / AnnData to be transformed\n :param kwargs: Other parameters for :func:`UmapL1.fit`.\n :return: Shrunk matrix / Anndata\n \"\"\"\n return self.fit(X, **kwargs).transform(X)\n\n @classmethod\n def tune(cls, target_n_features, X=None, *, X_teacher=None, batches=None,\n P=None, beta=None, must_keep=None, perplexity=30., n_pcs=None, w='ones',\n min_lasso=1e-8, max_lasso=1e-2, tolerance=0, smallest_log10_fold_change=0.1, max_iter=100,\n return_P_beta=False, n_threads=6,\n **kwargs):\n \"\"\"\n Automatically find proper lasso strength that returns the preferred number of markers\n\n :param target_n_features: number of features\n :param return_P_beta: controls what to return\n :param kwargs: all other parameters are the same for a UmapL1 model or :func:`UmapL1.fit`.\n :return: if return_P_beta is True and there are batches, (model, X, P, beta);\n if return_P_beta is True and there is no batches, (model, P, beta);\n otherwise, only model by default.\n \"\"\"\n if \"lasso\" in kwargs:\n raise ValueError(\"Parameter lasso should be substituted by max_lasso and min_lasso to set a range.\")\n if \"verbosity\" in kwargs:\n verbosity = kwargs['verbosity']\n else:\n verbosity = 3\n verbose_print = VerbosePrint(verbosity)\n tictoc = TicToc()\n\n n_features = X.shape[1]\n\n # initialize w\n if isinstance(w, float) or isinstance(w, int):\n w = np.zeros([1, n_features]) + w\n elif isinstance(w, str) and w == 'uniform':\n w = np.random.uniform(size=[1, n_features])\n elif isinstance(w, str) and w == 'ones':\n w = np.ones([1, n_features])\n else:\n w = np.array(w).reshape([1, n_features])\n\n max_log_lasso = np.log10(max_lasso)\n min_log_lasso = np.log10(min_lasso)\n\n if X_teacher is None: # if there is no other assay to mimic, just mimic itself\n X_teacher = X\n\n if batches is None:\n model_class = _RegUmapModel\n if n_pcs is None:\n P, beta = cls._resolve_P_beta(X_teacher, P, beta, perplexity, tictoc, verbose_print.prints, n_threads)\n else:\n pcs = PCA(n_pcs).fit_transform(X_teacher)\n P, beta = cls._resolve_P_beta(pcs, P, beta, perplexity, tictoc, verbose_print.prints, n_threads)\n else:\n model_class = _StratifiedRegUmapModel\n if P is None:\n X, P, beta = cls._resolve_batches(X_teacher, None, batches, n_pcs, perplexity, tictoc, verbose_print,\n n_threads)\n\n sup = n_features\n inf = 0\n\n model = None\n for it in range(max_iter):\n log_lasso = max_log_lasso / 2 + min_log_lasso / 2\n verbose_print(0, \"Iteration\", it, \"with lasso =\", 10 ** log_lasso,\n \"in [\", 10 ** min_log_lasso, \",\", 10 ** max_log_lasso, \"]...\", end=\" \")\n model = cls(w=w, lasso=10 ** log_lasso, n_pcs=n_pcs, perplexity=perplexity, **kwargs)\n n = model._fit_core(X, P, beta, must_keep, model_class, tictoc).get_mask().sum()\n verbose_print(0, \"Done. Number of features:\", n, \".\", tictoc.toc())\n if np.abs(n - target_n_features) <= tolerance: # Good number of features, return\n break\n\n if it > 0 and np.abs(log_lasso - prev_log_lasso) < smallest_log10_fold_change:\n warnings.warn(\"smallest_log10_fold_change reached before achieving target number of features.\")\n break\n\n prev_log_lasso = log_lasso\n\n if n > target_n_features: # Too many features, need more l1 regularization\n if n <= sup:\n sup = n\n else:\n warnings.warn(\"Monotonicity is violated. Value larger than current supremum. \"\n \"Binary search may fail. \"\n \"Consider use more max_outer_iter (default: 5) and max_inner_iter (default: 20).\")\n min_log_lasso = log_lasso\n elif n < target_n_features: # Too few features, need less l1 regularization\n if n >= inf:\n inf = n\n else:\n warnings.warn(\"Monotonicity is violated. Value lower than current infimum. \"\n \"Binary search may fail. \"\n \"Consider use more max_outer_iter (default: 5) and max_inner_iter (default: 20).\")\n max_log_lasso = log_lasso\n else: # max_iter reached\n warnings.warn(\"max_iter before reached achieving target number of features.\")\n\n if return_P_beta:\n if batches is None:\n return model, P, beta\n else:\n return model, X, P, beta\n else:\n return model\n\n @staticmethod\n def _resolve_P_beta(X, P, beta, perplexity, tictoc, print_callbacks, n_threads):\n if P is None and beta is None:\n print_callbacks[0](\"Calculating distance matrix and scaling factors...\")\n P, beta = UmapL1._x2p(X, perplexity=perplexity, print_callback=print_callbacks[1], n_threads=n_threads)\n print_callbacks[0](\"Done.\", tictoc.toc())\n elif P is None and beta is not None:\n print_callbacks[0](\"Calculating distance matrix...\")\n P = UmapL1._x2p_given_beta(X, beta)\n print_callbacks[0](\"Done.\", tictoc.toc())\n\n return P, beta\n\n @staticmethod\n def _resolve_batches(X, beta, batches, n_pcs, perplexity, tictoc, verbose_print, pca_seed, n_threads):\n batches = np.array(batches)\n batch_names = np.unique(batches)\n Xs = []\n Ps = []\n betas = []\n for batch in batch_names:\n batch_mask = (batches == batch)\n verbose_print(0, \"Batch\", batch, \"with\", sum(batch_mask), \"instances.\")\n\n Xs.append(X[batch_mask, :])\n if n_pcs is None:\n if beta is not None:\n new_beta = beta[batches == batch]\n else:\n new_beta = None\n P, new_beta = UmapL1._resolve_P_beta(Xs[-1], None, new_beta, perplexity, tictoc, verbose_print.prints, n_threads)\n else:\n pcs = PCA(n_pcs, random_state=pca_seed).fit_transform(Xs[-1])\n P, new_beta = UmapL1._resolve_P_beta(pcs, None, None, perplexity, tictoc, verbose_print.prints, n_threads)\n Ps.append(P)\n betas.append(new_beta)\n return Xs, Ps, betas\n\n def _fit_core(self, X, P, beta, must_keep, model_class: Type[_ABCTorchModel], tictoc):\n\n if self._use_beta_in_Q:\n self.verbose_print(0, \"Creating model without batches...\")\n model = model_class(P, X, self.w, beta, self._torch_precision, self._torch_cdist_compute_mode,\n self._t_distr, must_keep, ridge=self._ridge)\n else:\n self.verbose_print(0, \"Creating batch-stratified model...\")\n model = model_class(P, X, self.w, None, self._torch_precision, self._torch_cdist_compute_mode,\n self._t_distr, must_keep, ridge=self._ridge)\n\n if self._use_gpu:\n model.use_gpu()\n\n if self._lasso > 0.:\n self.verbose_print(0, \"Optimizing using OWLQN (because lasso is nonzero)...\")\n optimizer = OWLQN(model.parameters(), lasso=self._lasso, line_search_fn=\"strong_wolfe\",\n max_iter=self._max_inner_iter, history_size=self._owlqn_history_size, lr=1.)\n else:\n self.verbose_print(0, \"Optimizing using LBFGS (because lasso is zero)...\")\n optimizer = torch.optim.LBFGS(model.parameters(), line_search_fn=\"strong_wolfe\",\n max_iter=self._max_inner_iter, history_size=self._owlqn_history_size, lr=1.)\n\n if self._keep_fitting_info:\n self.model = model\n\n for t in range(self._max_outer_iter):\n def closure():\n if torch.is_grad_enabled():\n optimizer.zero_grad()\n loss = model.forward()\n\n if loss.requires_grad:\n loss.backward()\n\n return loss\n\n loss = optimizer.step(closure)\n self.verbose_print(1, t, 'loss (before this step):', loss.item(),\n \"Nonzero (after):\", (np.abs(model.get_w0()) > self._eps).sum(),\n tictoc.toc())\n\n self.w = model.get_w() # In case the user wants to interrupt the training\n\n loss = model.forward()\n self.verbose_print(1, 'Final', 'loss:', loss.item(), \"Nonzero:\", (np.abs(model.get_w0()) > self._eps).sum(),\n tictoc.toc())\n\n return self\n\n @staticmethod\n def _Hbeta(D=np.array([]), beta=1.0):\n \"\"\"\n Compute the perplexity and the P-row for a specific value of the\n precision of a Gaussian distribution.\n \"\"\"\n # Compute P-row and corresponding perplexity\n P = np.exp(-(D - np.min(D)) * beta)\n H = sum(P)\n return H, P\n\n @staticmethod\n def _x2p(X=np.array([]), tol=1e-5, perplexity=30.0, print_callback=print, *, n_threads):\n \"\"\"\n Performs a binary search to get P-values in such a way that each\n conditional Gaussian has the same perplexity.\n \"\"\"\n if n_threads > 1:\n return UmapL1._x2p_parallel(X, tol, perplexity, print_callback, n_threads)\n\n # Initialize some variables\n print_callback(\"Computing pairwise distances...\")\n (n, d) = X.shape\n sum_X = np.sum(np.square(X), 1)\n D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X)\n D = np.sqrt(np.maximum(D, 0))\n P = np.zeros((n, n))\n beta = np.ones((n, 1))\n logU = np.log(perplexity)\n\n # Loop over all datapoints\n for i in range(n):\n\n # Print progress\n if i % 500 == 0:\n print_callback(\"Computing P-values for point %d of %d...\" % (i, n))\n\n # Compute the Gaussian kernel and entropy for the current precision\n betamin = -np.inf\n betamax = np.inf\n Di = D[i, np.concatenate((np.r_[0:i], np.r_[i + 1:n]))]\n (H, thisP) = UmapL1._Hbeta(Di, beta[i])\n\n # Evaluate whether the perplexity is within tolerance\n Hdiff = H - logU\n tries = 0\n\n while (not np.abs(Hdiff) < tol) and tries < 100:\n # If not, increase or decrease precision\n if Hdiff > 0:\n betamin = beta[i].copy()\n if betamax == np.inf or betamax == -np.inf:\n beta[i] = beta[i] + 1.\n else:\n beta[i] = (beta[i] + betamax) / 2.\n else:\n betamax = beta[i].copy()\n if betamin == np.inf or betamin == -np.inf:\n beta[i] = beta[i] / 2.\n else:\n beta[i] = (beta[i] + betamin) / 2.\n\n # Recompute the values\n (H, thisP) = UmapL1._Hbeta(Di, beta[i])\n Hdiff = H - logU\n tries += 1\n\n # Set the final row of P\n P[i, np.concatenate((np.r_[0:i], np.r_[i + 1:n]))] = thisP\n\n # Return final P-matrix\n print_callback(\"Mean value of sigma: %f\" % np.mean(np.sqrt(1 / beta)))\n return P, beta\n\n @staticmethod\n def _x2p_given_beta(X, beta):\n (n, d) = X.shape\n sum_X = np.sum(np.square(X), 1)\n D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X)\n P = np.zeros((n, n))\n for i in range(n):\n (H, P[i, np.concatenate((np.r_[0:i], np.r_[i + 1:n]))]) = UmapL1._Hbeta(\n D[i, np.concatenate((np.r_[0:i], np.r_[i + 1:n]))], beta[i])\n return P\n\n @staticmethod\n def _x2p_process(Di, logU, tol):\n beta = 1.\n betamin = -np.inf\n betamax = np.inf\n (H, thisP) = UmapL1._Hbeta(Di, beta)\n\n Hdiff = H - logU\n tries = 0\n\n while (not np.abs(Hdiff) < tol) and tries < 100:\n # If not, increase or decrease precision\n if Hdiff > 0:\n betamin = beta\n if betamax == np.inf or betamax == -np.inf:\n beta = beta * 2.\n else:\n beta = (beta + betamax) / 2.\n else:\n betamax = beta\n if betamin == np.inf or betamin == -np.inf:\n beta = beta / 2.\n else:\n beta = (beta + betamin) / 2.\n\n # Recompute the values\n (H, thisP) = UmapL1._Hbeta(Di, beta)\n Hdiff = H - logU\n tries += 1\n return thisP, beta\n # Set the final row of P\n\n @staticmethod\n def _x2p_parallel(X=np.array([]), tol=1e-5, perplexity=30.0, print_callback=print, n_threads=6):\n \"\"\"\n Performs a binary search to get P-values in such a way that each\n conditional Gaussian has the same perplexity.\n \"\"\"\n\n # Initialize some variables\n print_callback(\"Computing pairwise distances...\")\n (n, d) = X.shape\n sum_X = np.sum(np.square(X), 1)\n D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X)\n D = np.sqrt(np.maximum(D, 0))\n logU = np.log2(perplexity)\n\n # Loop over all datapoints\n # for i in range(n):\n # Compute the Gaussian kernel and entropy for the current precision\n\n print_callback(\"Using\", n_threads, \"threads...\")\n parameters = [(D[i, np.concatenate((np.r_[0:i], np.r_[i + 1:n]))], logU, tol) for i in range(n)]\n with multiprocessing.Pool(n_threads) as pool:\n results = pool.starmap(UmapL1._x2p_process, parameters)\n\n beta = np.ones((n, 1))\n P = np.zeros((n, n))\n for i in range(n):\n P[i, np.concatenate((np.r_[0:i], np.r_[i + 1:n]))] = results[i][0]\n beta[i] = results[i][1]\n\n # Return final P-matrix\n print_callback(\"Mean value of sigma: %f\" % np.mean(np.sqrt(1 / beta)))\n return P, beta\n"
] |
[
[
"numpy.square",
"numpy.dot",
"numpy.log",
"numpy.log2",
"numpy.maximum",
"numpy.abs",
"numpy.sqrt",
"numpy.unique",
"numpy.min",
"numpy.ones",
"numpy.concatenate",
"numpy.log10",
"numpy.argpartition",
"numpy.random.uniform",
"torch.is_grad_enabled",
"numpy.array",
"numpy.zeros",
"sklearn.decomposition.PCA"
]
] |
d-gol/katib
|
[
"2c8758b26ffd543e08b70464f8ac7b286f3ca2ea"
] |
[
"pkg/suggestion/v1beta1/bayesianoptimization/model/gp.py"
] |
[
"# Copyright 2022 The Kubeflow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\" module for gaussian process prior \"\"\"\nfrom sklearn.gaussian_process.kernels import RBF, Matern\nfrom sklearn.gaussian_process import GaussianProcessRegressor\n\n\nclass GaussianProcessModel:\n \"\"\" use the gaussian process as a prior \"\"\"\n def __init__(self, length_scale=0.5, noise=0.00005,\n nu=1.5, kernel_type=\"matern\"):\n \"\"\"\n :param length_scale: the larger the length_scale is, the smoother the gaussian prior is. If a float,\n an isotropic kernel is used. If an array, an anisotropic kernel is used where each dimension of it defines\n the length-scale of the respective feature dimension.\n :param noise:\n :param nu: control the smoothness of the prior using Matern kernel. The larger nu is, the smoother the\n approximate function is.\n :param kernel_type: \"rbf\": squared exponential kernel, \"matern\": Matern kernel.\n \"\"\"\n if kernel_type == \"rbf\":\n kernel = RBF(length_scale=length_scale)\n elif kernel_type == \"matern\":\n kernel = Matern(length_scale=length_scale, nu=nu)\n else:\n raise Exception(\"kernel_type must be 'rbf' or 'matern'\")\n self.gp = GaussianProcessRegressor(\n kernel=kernel,\n alpha=noise,\n random_state=0,\n optimizer=None,\n )\n\n def fit(self, X_train, y_train):\n self.gp.fit(X_train, y_train)\n\n def predict(self, X_test):\n y_mean, y_std = self.gp.predict(X_test, return_std=True)\n y_variance = y_std ** 2\n return y_mean, y_std, y_variance\n"
] |
[
[
"sklearn.gaussian_process.kernels.RBF",
"sklearn.gaussian_process.GaussianProcessRegressor",
"sklearn.gaussian_process.kernels.Matern"
]
] |
jessevp07/lcoc-ldevs
|
[
"c2dac1b17618fe50c3298aa3d915975a5740812a"
] |
[
"lcoc/urdb.py"
] |
[
"\"\"\"\nDatabaseRates object created from rates downloaded from the URDB, \nhttps://openei.org.\n\"\"\"\n#public\nimport sys\nimport glob\nimport logging\nimport warnings\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\nsys.path.append('..\\\\')\nimport config as config\nimport lcoc.readwrite as readwrite\nimport lcoc.helpers as helpers\n\n#settings\npd.options.mode.chained_assignment = None \n\nclass DatabaseRates(object):\n \"\"\"\n Object for working with data downloaded from NREL's Utility Rate \n Database (URDB). Rates in the URDB are checked at updated annually by NREL \n under funding from the U.S. Department of Energy's Solar Energy \n Technologies Program, in partnership with Illinois State University's \n Intstitute for Regulatory Policy Studies.\n\n Attributes\n -----------\n source:\n URL used to download URDB data\n rate_data: \n pandas.DataFrame where each row represents a unique utility rate,\n unfiltered & unprocessed from the URDB.\n res_rate_data:\n pandas.DataFrame where each row represents a unique residential utility\n rate\n com_rate_data:\n pandas.DataFrame where each row represents a unique commerical utility\n rate\n prev_exists:\n Boolean indicating whether version of dataset has been previously ran\n \"\"\"\n\n def __init__(self, urdb_file=None):\n # Download URDB data\n self.source='https://openei.org/apps/USURDB/download/usurdb.csv.gz'\n \n # Load URDB data\n if urdb_file is not None:\n self.rate_data = pd.read_csv(urdb_file, low_memory=False)\n else:\n self.rate_data = readwrite.read_urdb_data(self.source)\n \n # Assign rate_id\n self.rate_data['rate_id'] = list(range(1, len(self.rate_data)+1))\n \n # Save copy of URDB data (if unique) to data/urdb\n self.prev_exists = readwrite.write_urdb_rate_data(self.rate_data)\n \n # Separate residential & commercial rates into separate dfs\n self.res_rate_data = self.rate_data[self.rate_data['sector']=='Residential']\n self.com_rate_data = self.rate_data[self.rate_data['sector'].isin(['Commercial', 'Industrial'])]\n\n def filter_stale_rates(self, industry):\n \"\"\"\n Removes rates w/ specified end date, so that only rates without \n end dates remain (active rates). The industry arg must be \"residential\" \n or \"commercial\".\n \"\"\"\n\n industry = industry.lower()\n if industry == 'residential':\n df = self.res_rate_data\n \n elif industry == 'commercial':\n df = self.com_rate_data\n \n else:\n raise ValueError(\"Industry must be 'residential' or 'commercial'!\")\n\n df = df[df.enddate.isnull()]\n\n if industry == 'residential':\n self.res_rate_data = df\n \n elif industry == 'commercial':\n self.com_rate_data = df \n\n def classify_rate_structures(self, industry, ev_rate_words_file='filters\\\\urdb_res_ev_specific_rate_words.txt'):\n \"\"\"\n Adds five columns to self.res_rate_data and four to self.com_rate_data, \n ['is_ev_rate' (residential only), is_demand_rate', 'is_tier_rate', '\n is_seasonal_rate', 'is_tou_rate'], that are binary classifiers \n indicating whether a rate is a demand rate, tier rate, seasonal rate, and/or \n TOU rate. Note that rates may be combinations of 1+ rate structure. \n Tier rates and flat rates are mutally exclusive, meaning when \n 'is_tier_rate'==0, it is a flat rate.\n \"\"\"\n\n industry = industry.lower()\n if industry == 'residential':\n df = self.res_rate_data\n \n with open(ev_rate_words_file, 'r') as f:\n filters = f.read().splitlines()\n \n df['is_ev_rate'] = ((df.name.apply(lambda x: helpers.contains_filter_phrases(x, filters)==True))|\n (df.description.apply(lambda x: helpers.contains_filter_phrases(x, filters)))==True).map(int)\n\n elif industry == 'commercial':\n df = self.com_rate_data\n \n else:\n raise ValueError(\"industry must be 'residential' or 'commercial'!\")\n \n # Classify by rate structure\n is_demand, is_tier, is_seasonal, is_tou = [], [], [], []\n\n tier_cols = []\n for tier in range(1,11): #period 0\n tier_cols.append('energyratestructure/period0/tier{}rate'.format(tier))\n\n for tier in range(1,8): #period 1\n tier_cols.append('energyratestructure/period1/tier{}rate'.format(tier))\n\n for per in range(2,6): #period 2-5\n for tier in range(1,5):\n tier_cols.append('energyratestructure/period{0}/tier{1}rate'.format(per, tier))\n\n for _, row in df.iterrows():\n # Demand rate check\n if (np.isnan(float(row['flatdemandstructure/period0/tier0rate'])) \n and np.isnan(float(row['demandratestructure/period0/tier0rate']))):\n is_demand.append(0)\n \n else:\n is_demand.append(1)\n\n # Tier rate check\n tier_check = int(row[tier_cols].isnull().all()==False)\n is_tier.append(tier_check)\n\n # Seasonal & TOU rate check\n try:\n year_wkdays = [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(row['energyweekdayschedule']).split(']')][:-2]\n year_wknds = [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(row['energyweekendschedule']).split(']')][:-2]\n\n #seasonal\n if (len(set(year_wkdays))>1) or (len(set(year_wknds))>1):\n seasonal=1\n else:\n seasonal=0\n\n is_seasonal.append(seasonal)\n\n #TOU\n tous =[]\n for wkday_month, wknd_month in zip(year_wkdays, year_wknds):\n if (len(set(wkday_month))>1) or (len(set(wknd_month))>1):\n tous.append(1)\n \n else:\n tous.append(0)\n \n if np.array(tous).sum()==0:\n tou=0\n \n else:\n tou=1\n\n is_tou.append(tou)\n \n except:\n is_seasonal.append(np.nan)\n is_tou.append(np.nan)\n\n df['is_demand_rate'] = is_demand\n df['is_tier_rate'] = is_tier\n df['is_seasonal_rate'] = is_seasonal\n df['is_tou_rate'] = is_tou\n\n if industry == 'residential':\n self.res_rate_data = df\n \n elif industry == 'commercial':\n self.com_rate_data = df\n\n def generate_classification_tree_values(self, industry):\n \"\"\"\n Returns dictionary of branch name: number of rates for each branch\n in the rate structure classification tree.\n \"\"\"\n\n industry = industry.lower()\n if industry == 'residential':\n df = self.res_rate_data\n \n elif industry == 'commercial':\n df = self.com_rate_data\n \n else:\n raise ValueError(\"industry must be 'residential' or 'commercial'!\")\n\n class_tree_cnts = {}\n class_tree_cnts['demand'] = len(df[df.is_demand_rate==1])\n class_tree_cnts['no_demand'] = len(df[df.is_demand_rate==0])\n class_tree_cnts['demand/tier'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)])\n class_tree_cnts['demand/fixed'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)])\n class_tree_cnts['no_demand/tier'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)])\n class_tree_cnts['no_demand/fixed'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)])\n class_tree_cnts['demand/tier/seasonal'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)])\n class_tree_cnts['demand/tier/no_seasonal'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)])\n class_tree_cnts['demand/fixed/seasonal'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)])\n class_tree_cnts['demand/fixed/no_seasonal'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)])\n class_tree_cnts['no_demand/tier/seasonal'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)])\n class_tree_cnts['no_demand/tier/no_seasonal'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)])\n class_tree_cnts['no_demand/fixed/seasonal'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)])\n class_tree_cnts['no_demand/fixed/no_seasonal'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)])\n class_tree_cnts['demand/tier/seasonal/tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)&(df.is_tou_rate==1)])\n class_tree_cnts['demand/tier/seasonal/no_tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)&(df.is_tou_rate==0)])\n class_tree_cnts['demand/tier/no_seasonal/tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)&(df.is_tou_rate==1)])\n class_tree_cnts['demand/tier/no_seasonal/no_tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)&(df.is_tou_rate==0)])\n class_tree_cnts['demand/fixed/seasonal/tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)&(df.is_tou_rate==1)])\n class_tree_cnts['demand/fixed/seasonal/no_tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)&(df.is_tou_rate==0)])\n class_tree_cnts['demand/fixed/no_seasonal/tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)&(df.is_tou_rate==1)])\n class_tree_cnts['demand/fixed/no_seasonal/no_tou'] = len(df[(df.is_demand_rate==1)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)&(df.is_tou_rate==0)])\n class_tree_cnts['no_demand/tier/seasonal/tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)&(df.is_tou_rate==1)])\n class_tree_cnts['no_demand/tier/seasonal/no_tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==1)&(df.is_tou_rate==0)])\n class_tree_cnts['no_demand/tier/no_seasonal/tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)&(df.is_tou_rate==1)])\n class_tree_cnts['no_demand/tier/no_seasonal/no_tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==1)&(df.is_seasonal_rate==0)&(df.is_tou_rate==0)])\n class_tree_cnts['no_demand/fixed/seasonal/tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)&(df.is_tou_rate==1)])\n class_tree_cnts['no_demand/fixed/seasonal/no_tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==1)&(df.is_tou_rate==0)])\n class_tree_cnts['no_demand/fixed/no_seasonal/tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)&(df.is_tou_rate==1)])\n class_tree_cnts['no_demand/fixed/no_seasonal/no_tou'] = len(df[(df.is_demand_rate==0)&(df.is_tier_rate==0)&(df.is_seasonal_rate==0)&(df.is_tou_rate==0)])\n\n return class_tree_cnts\n \n def filter_demand_rates(self, industry):\n \"\"\"\n Filters rates w/ demand charges.\n \"\"\"\n\n industry = industry.lower()\n if industry == 'residential':\n df = self.res_rate_data\n \n elif industry == 'commercial':\n df = self.com_rate_data\n \n else:\n raise ValueError(\"industry must be 'residential' or 'commercial'!\")\n\n df = df[df.is_demand_rate==0]\n\n if industry == 'residential':\n self.res_rate_data = df\n \n elif industry == 'commercial':\n self.com_rate_data = df\n\n def filter_on_phrases(self, industry, filters_path='filters/'):\n \"\"\"Filters rates on lists of filter phrases: \n filters/urdb_res_filters.txt for residential rates and\n filters/urdb_dcfc_filters.txt for commercial rates.\n \"\"\"\n industry = industry.lower()\n if industry == 'residential':\n df = self.res_rate_data\n filters_file = filters_path + 'urdb_res_filters.txt'\n \n elif industry == 'commercial':\n df = self.com_rate_data\n filters_file = filters_path + 'urdb_dcfc_filters.txt'\n \n else:\n raise ValueError(\"industry must be 'residential' or 'commercial'!\")\n\n with open(filters_file, 'r') as f:\n filters = f.read().splitlines()\n\n df = df[((df.name.apply(lambda x: helpers.contains_filter_phrases(x, filters)))==False)&\n ((df.description.apply(lambda x: helpers.contains_filter_phrases(x, filters)))==False)]\n\n if industry == 'residential':\n self.res_rate_data = df\n \n elif industry == 'commercial':\n self.com_rate_data = df\n\n def additional_com_rate_filters(self):\n \"\"\"\n Filters commercial rates missing critical fields for approximating the\n cost of electricity.\n \"\"\"\n\n df = self.com_rate_data\n \n # Filter rates that don't use kW, kWh\n df = df[(df.demandunits == 'kW')&\n (df.flatdemandunits == 'kW')&\n (df.demandunits == 'kW')]\n\n # Filter rates in $/day (fixed charge)\n df = df[df.fixedchargeunits != '$/day']\n\n # Filter rates w/ min voltages higher than 900 V\n df = df[(df.voltageminimum <= 900)|(df.voltageminimum.isnull())]\n\n # Filter rates w/ coincident demand structure (can't predict utility peak dmnd)\n df = df[df['coincidentratestructure/period0/tier0rate'].isnull()]\n\n # Filter rates w/o energy rate information\n df = df[~df['energyratestructure/period0/tier0rate'].isnull()]\n\n self.com_rate_data = df\n\n def com_rate_preprocessing(self):\n \"\"\"\n Standardizes units and reporting for commercial rates.\n \"\"\"\n\n df = self.com_rate_data\n\n # Set: fixed charge = 0 when fixed charge == NULL \n df['fixedchargefirstmeter'] = df['fixedchargefirstmeter'].fillna(0)\n df['fixedchargeunits'] = df['fixedchargeunits'].fillna('$/month')\n \n # Sun-func for converting to $/month\n def convert_to_dollars_per_month(units, charges):\n monthly_charges = []\n for unit, charge in zip(units, charges):\n \n if unit=='$/month':\n monthly_charges.append(charge)\n \n elif unit=='$/day':\n monthly_charges.append(charge*30)\n \n else:\n raise ValueError('\"{0}\" unit not recognized'.format(unit)) \n \n return monthly_charges\n\n df['fixedchargefirstmeter'] = convert_to_dollars_per_month(df['fixedchargeunits'], df['fixedchargefirstmeter'])\n df['fixedchargeunits'] = '$/month'\n\n # Min demand constraint = 0 when NULL\n df['peakkwcapacitymin'] = df['peakkwcapacitymin'].fillna(0)\n df['peakkwhusagemin'] = df['peakkwhusagemin'].fillna(0)\n\n # Max demand contraint = inf when NULL\n df['peakkwcapacitymax'] = df['peakkwcapacitymax'].fillna(np.inf)\n df['peakkwhusagemax'] = df['peakkwhusagemax'].fillna(np.inf)\n\n self.com_rate_data = df\n\n def combine_rates(self, industry):\n \"\"\"\n Adds 57 columns to self.res_rate_data and self.com_rate_data that are the \n sum of the base rate and adjusted rate.\n \"\"\"\n\n industry = industry.lower()\n if industry == 'residential':\n df = self.res_rate_data\n \n elif industry == 'commercial':\n df = self.com_rate_data\n \n else:\n raise ValueError(\"industry must be 'residential' or 'commercial'!\")\n\n #period 0 (11 tiers)\n df['energyrate/period0/tier0'] = df['energyratestructure/period0/tier0rate'] + df['energyratestructure/period0/tier0adj'].fillna(0)\n df['energyrate/period0/tier1'] = df['energyratestructure/period0/tier1rate'] + df['energyratestructure/period0/tier1adj'].fillna(0)\n df['energyrate/period0/tier2'] = df['energyratestructure/period0/tier2rate'] + df['energyratestructure/period0/tier2adj'].fillna(0)\n df['energyrate/period0/tier3'] = df['energyratestructure/period0/tier3rate'] + df['energyratestructure/period0/tier3adj'].fillna(0)\n df['energyrate/period0/tier4'] = df['energyratestructure/period0/tier4rate'] + df['energyratestructure/period0/tier4adj'].fillna(0)\n df['energyrate/period0/tier5'] = df['energyratestructure/period0/tier5rate'] + df['energyratestructure/period0/tier5adj'].fillna(0)\n df['energyrate/period0/tier6'] = df['energyratestructure/period0/tier6rate'] + df['energyratestructure/period0/tier6adj'].fillna(0)\n df['energyrate/period0/tier7'] = df['energyratestructure/period0/tier7rate'] + df['energyratestructure/period0/tier7adj'].fillna(0)\n df['energyrate/period0/tier8'] = df['energyratestructure/period0/tier8rate'] + df['energyratestructure/period0/tier8adj'].fillna(0)\n df['energyrate/period0/tier9'] = df['energyratestructure/period0/tier9rate'] + df['energyratestructure/period0/tier9adj'].fillna(0)\n df['energyrate/period0/tier10'] = df['energyratestructure/period0/tier10rate'] + df['energyratestructure/period0/tier10adj'].fillna(0)\n \n #period 1 (8 tiers)\n df['energyrate/period1/tier0'] = df['energyratestructure/period1/tier0rate'] + df['energyratestructure/period1/tier0adj'].fillna(0)\n df['energyrate/period1/tier1'] = df['energyratestructure/period1/tier1rate'] + df['energyratestructure/period1/tier1adj'].fillna(0)\n df['energyrate/period1/tier2'] = df['energyratestructure/period1/tier2rate'] + df['energyratestructure/period1/tier2adj'].fillna(0)\n df['energyrate/period1/tier3'] = df['energyratestructure/period1/tier3rate'] + df['energyratestructure/period1/tier3adj'].fillna(0)\n df['energyrate/period1/tier4'] = df['energyratestructure/period1/tier4rate'] + df['energyratestructure/period1/tier4adj'].fillna(0)\n df['energyrate/period1/tier5'] = df['energyratestructure/period1/tier5rate'] + df['energyratestructure/period1/tier5adj'].fillna(0)\n df['energyrate/period1/tier6'] = df['energyratestructure/period1/tier6rate'] + df['energyratestructure/period1/tier6adj'].fillna(0)\n df['energyrate/period1/tier7'] = df['energyratestructure/period1/tier7rate'] + df['energyratestructure/period1/tier7adj'].fillna(0)\n\n #period 2 (5 tiers)\n df['energyrate/period2/tier0'] = df['energyratestructure/period2/tier0rate'] + df['energyratestructure/period2/tier0adj'].fillna(0)\n df['energyrate/period2/tier1'] = df['energyratestructure/period2/tier1rate'] + df['energyratestructure/period2/tier1adj'].fillna(0)\n df['energyrate/period2/tier2'] = df['energyratestructure/period2/tier2rate'] + df['energyratestructure/period2/tier2adj'].fillna(0)\n df['energyrate/period2/tier3'] = df['energyratestructure/period2/tier3rate'] + df['energyratestructure/period2/tier3adj'].fillna(0)\n df['energyrate/period2/tier4'] = df['energyratestructure/period2/tier4rate'] + df['energyratestructure/period2/tier4adj'].fillna(0)\n\n #period 3 (5 tiers)\n df['energyrate/period3/tier0'] = df['energyratestructure/period3/tier0rate'] + df['energyratestructure/period3/tier0adj'].fillna(0)\n df['energyrate/period3/tier1'] = df['energyratestructure/period3/tier1rate'] + df['energyratestructure/period3/tier1adj'].fillna(0)\n df['energyrate/period3/tier2'] = df['energyratestructure/period3/tier2rate'] + df['energyratestructure/period3/tier2adj'].fillna(0)\n df['energyrate/period3/tier3'] = df['energyratestructure/period3/tier3rate'] + df['energyratestructure/period3/tier3adj'].fillna(0)\n df['energyrate/period3/tier4'] = df['energyratestructure/period3/tier4rate'] + df['energyratestructure/period3/tier4adj'].fillna(0)\n\n #period 4 (5 tiers)\n df['energyrate/period4/tier0'] = df['energyratestructure/period4/tier0rate'] + df['energyratestructure/period4/tier0adj'].fillna(0)\n df['energyrate/period4/tier1'] = df['energyratestructure/period4/tier1rate'] + df['energyratestructure/period4/tier1adj'].fillna(0)\n df['energyrate/period4/tier2'] = df['energyratestructure/period4/tier2rate'] + df['energyratestructure/period4/tier2adj'].fillna(0)\n df['energyrate/period4/tier3'] = df['energyratestructure/period4/tier3rate'] + df['energyratestructure/period4/tier3adj'].fillna(0)\n df['energyrate/period4/tier4'] = df['energyratestructure/period4/tier4rate'] + df['energyratestructure/period4/tier4adj'].fillna(0)\n\n #period 5 (5 tiers)\n df['energyrate/period5/tier0'] = df['energyratestructure/period5/tier0rate'] + df['energyratestructure/period5/tier0adj'].fillna(0)\n df['energyrate/period5/tier1'] = df['energyratestructure/period5/tier1rate'] + df['energyratestructure/period5/tier1adj'].fillna(0)\n df['energyrate/period5/tier2'] = df['energyratestructure/period5/tier2rate'] + df['energyratestructure/period5/tier2adj'].fillna(0)\n df['energyrate/period5/tier3'] = df['energyratestructure/period5/tier3rate'] + df['energyratestructure/period5/tier3adj'].fillna(0)\n df['energyrate/period5/tier4'] = df['energyratestructure/period5/tier4rate'] + df['energyratestructure/period5/tier4adj'].fillna(0)\n\n #period 6-23\n df['energyrate/period6/tier0'] = df['energyratestructure/period6/tier0rate'] + df['energyratestructure/period6/tier0adj'].fillna(0)\n df['energyrate/period7/tier0'] = df['energyratestructure/period7/tier0rate'] + df['energyratestructure/period7/tier0adj'].fillna(0)\n df['energyrate/period8/tier0'] = df['energyratestructure/period8/tier0rate'] + df['energyratestructure/period8/tier0adj'].fillna(0)\n df['energyrate/period9/tier0'] = df['energyratestructure/period9/tier0rate'] + df['energyratestructure/period9/tier0adj'].fillna(0)\n df['energyrate/period10/tier0'] = df['energyratestructure/period10/tier0rate'] + df['energyratestructure/period10/tier0adj'].fillna(0)\n df['energyrate/period11/tier0'] = df['energyratestructure/period11/tier0rate'] + df['energyratestructure/period11/tier0adj'].fillna(0)\n df['energyrate/period12/tier0'] = df['energyratestructure/period12/tier0rate'] + df['energyratestructure/period12/tier0adj'].fillna(0)\n df['energyrate/period13/tier0'] = df['energyratestructure/period13/tier0rate'] + df['energyratestructure/period13/tier0adj'].fillna(0)\n df['energyrate/period14/tier0'] = df['energyratestructure/period14/tier0rate'] + df['energyratestructure/period14/tier0adj'].fillna(0)\n df['energyrate/period15/tier0'] = df['energyratestructure/period15/tier0rate'] + df['energyratestructure/period15/tier0adj'].fillna(0)\n df['energyrate/period16/tier0'] = df['energyratestructure/period16/tier0rate'] + df['energyratestructure/period16/tier0adj'].fillna(0)\n df['energyrate/period17/tier0'] = df['energyratestructure/period17/tier0rate'] + df['energyratestructure/period17/tier0adj'].fillna(0)\n df['energyrate/period18/tier0'] = df['energyratestructure/period18/tier0rate'] + df['energyratestructure/period18/tier0adj'].fillna(0)\n df['energyrate/period19/tier0'] = df['energyratestructure/period19/tier0rate'] + df['energyratestructure/period19/tier0adj'].fillna(0)\n df['energyrate/period20/tier0'] = df['energyratestructure/period20/tier0rate'] + df['energyratestructure/period20/tier0adj'].fillna(0)\n df['energyrate/period21/tier0'] = df['energyratestructure/period21/tier0rate'] + df['energyratestructure/period21/tier0adj'].fillna(0)\n df['energyrate/period22/tier0'] = df['energyratestructure/period22/tier0rate'] + df['energyratestructure/period22/tier0adj'].fillna(0)\n df['energyrate/period23/tier0'] = df['energyratestructure/period23/tier0rate'] + df['energyratestructure/period23/tier0adj'].fillna(0)\n\n if industry == 'residential':\n self.res_rate_data = df\n \n elif industry == 'commercial':\n self.com_rate_data = df\n\n def filter_null_rates(self, industry):\n \"\"\"\n Filters rates with no cost information.\n \"\"\"\n\n industry = industry.lower()\n if industry == 'residential':\n df = self.res_rate_data\n \n elif industry == 'commercial':\n df = self.com_rate_data\n \n else:\n raise ValueError(\"industry must be 'residential' or 'commercial'!\")\n\n df = df.dropna(subset=['energyrate/period0/tier0'])\n\n if industry == 'residential':\n self.res_rate_data = df\n \n elif industry == 'commercial':\n self.com_rate_data = df\n\n def calculate_annual_energy_cost_residential(self, outpath='outputs/cost-of-electricity/urdb-res-rates/'):\n \"\"\"\n Calculates the annualized energy costs for residential rates. Estimates \n account for seasonal, tier, and TOU rate structures. Key assumptions \n include: 1) Charging occurs with the same freqency irregardless of \n weekday vs. weekend or season (time of year); 2) Charging occurs with \n the same frequency across rate tiers; 3) For TOU rates, charging will \n always occur when it is cheapest to do so (off-peak). Adds \n 'electricity_cost_per_kwh' col to self.res_rate_data.\n \"\"\"\n\n # Fixed Rates - incl. seasonal & TOU\n res_rates_fixed = self.res_rate_data[self.res_rate_data.is_tier_rate==0]\n avg_costs = []\n for i in range(len(res_rates_fixed)):\n month_rates = []\n \n #weekday\n for month in [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(res_rates_fixed.iloc[i]['energyweekdayschedule']).split(']')][:-2]: #seasonal\n periods = (list(set(month)))\n day_rates = []\n \n for per in periods: #TOU\n rate_str = 'energyrate/period{}/tier0'.format(per)\n rate = res_rates_fixed.iloc[i][rate_str]\n day_rates.append(rate)\n\n min_day_rate = min(np.array(day_rates)) \n month_rates.extend([min_day_rate]*5)\n\n #weekend\n for month in [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(res_rates_fixed.iloc[i]['energyweekendschedule']).split(']')][:-2]: #seasonal\n periods = (list(set(month)))\n day_rates = []\n \n for per in periods: #TOU\n rate_str = 'energyrate/period{}/tier0'.format(per)\n rate = res_rates_fixed.iloc[i][rate_str]\n day_rates.append(rate)\n\n min_day_rate = min(np.array(day_rates)) \n month_rates.extend([min_day_rate]*2)\n\n avg_cost = np.array(month_rates).mean() #dow-weighted cost\n avg_costs.append(avg_cost)\n \n res_rates_fixed['electricity_cost_per_kwh'] = avg_costs\n\n # Tier Rates - incl. seasonal & TOU\n res_rates_tier = self.res_rate_data[self.res_rate_data.is_tier_rate==1]\n avg_costs = []\n for i in range(len(res_rates_tier)): #tier rate = avg of all tiers\n avg_tier_rates = []\n avg_tier_month_rates = []\n for p in range(24):\n if p==0:\n tier_rates = []\n for t in range(11):\n rate_str = 'energyrate/period{0}/tier{1}'.format(p,t)\n rate = res_rates_tier.iloc[i][rate_str]\n tier_rates.append(rate)\n \n with warnings.catch_warnings(): #supress warnings\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n avg_tier_rate = np.nanmean(np.array(tier_rates)) \n \n avg_tier_rates.append(avg_tier_rate)\n\n elif p==1:\n tier_rates = []\n for t in range(8):\n rate_str = 'energyrate/period{0}/tier{1}'.format(p,t)\n rate = res_rates_tier.iloc[i][rate_str]\n tier_rates.append(rate)\n \n with warnings.catch_warnings(): #supress warnings\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n avg_tier_rate = np.nanmean(np.array(tier_rates))\n \n avg_tier_rates.append(avg_tier_rate)\n\n elif p>=2 and p<6:\n tier_rates = []\n for t in range(5):\n rate_str = 'energyrate/period{0}/tier{1}'.format(p,t)\n rate = res_rates_tier.iloc[i][rate_str]\n tier_rates.append(rate)\n \n with warnings.catch_warnings(): #supress warnings\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n avg_tier_rate = np.nanmean(np.array(tier_rates))\n\n avg_tier_rates.append(avg_tier_rate)\n\n else:\n rate_str = 'energyrate/period{0}/tier0'.format(p)\n rate = res_rates_tier.iloc[i][rate_str]\n avg_tier_rates.append(rate)\n \n\n #weekday rates\n months = [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(res_rates_tier.iloc[i]['energyweekdayschedule']).split(']')][:-2] \n for month in months: #seasonal\n periods = (list(set(month)))\n avg_rates = []\n for per in periods: #TOU\n per = int(per)\n avg_tier_rate = avg_tier_rates[per]\n avg_rates.append(avg_tier_rate)\n\n min_avg_tier_day_rate = min(np.array(avg_rates))\n avg_tier_month_rates.extend([min_avg_tier_day_rate]*5)\n\n #weekend rates\n months = [ls.replace('[', '').replace(',', '').replace(' ', '') for ls in str(res_rates_tier.iloc[i]['energyweekendschedule']).split(']')][:-2]\n for month in months:\n periods = (list(set(month)))\n avg_rates = []\n for per in periods:\n per = int(per)\n avg_tier_rate = avg_tier_rates[per]\n avg_rates.append(avg_tier_rate)\n\n min_avg_tier_day_rate = min(np.array(avg_rates))\n avg_tier_month_rates.extend([min_avg_tier_day_rate]*2)\n \n avg_cost = np.array(avg_tier_month_rates).mean() #dow-weighted cost\n avg_costs.append(avg_cost)\n\n res_rates_tier['electricity_cost_per_kwh'] = avg_costs\n res_df = pd.concat([res_rates_fixed, res_rates_tier], sort=False)\n res_df = res_df[res_df.electricity_cost_per_kwh>=0] #remove negative rates\n self.res_rate_data = res_df\n \n self.res_rate_data.to_csv(outpath+'res_rates.csv', index=False)\n print(\"Complete, {} rates included.\".format(len(self.res_rate_data)))\n\n\n def calculate_annual_cost_dcfc(self, \n dcfc_load_profiles = config.DCFC_PROFILES_DICT,\n outpath = 'outputs/cost-of-electricity/urdb-dcfc-rates/',\n log_lvl = 1):\n \"\"\"\n Calculates the annualized average daily cost to charge for \n commercial rates under an annual dcfc_load_profile. Estimates account \n for demand, seasonal, tier, and TOU rate structures. Due to it's\n significant runtime, function outputs a .csv at outpath for each profile \n in dcfc_load_profiles. The log_lvl parameter must be in [0,1,2] where higher\n levels reflect more verbose logs.\n \"\"\"\n \n assert log_lvl in [0,1,2], \"Unexpected log_lvl, must be in [0,1,2]\"\n \n if log_lvl == 0:\n log_lbl = logging.WARNING\n \n elif log_lvl == 1:\n log_lbl = logging.INFO\n \n elif log_lvl == 2:\n log_lbl = logging.DEBUG\n \n logging.basicConfig(level=log_lbl)\n \n for p in dcfc_load_profiles.keys():\n # Load profile\n profile_path = dcfc_load_profiles[p]\n profile_df = pd.read_csv(profile_path, index_col=0, parse_dates=True)\n \n # Deconstruct timestamp\n months = profile_df.index.month\n days = profile_df.index.day\n hours = profile_df.index.hour\n minutes = profile_df.index.minute\n weekday = profile_df.index.weekday\n\n # Convert load profile -> energy profile\n energy_profile_df = pd.DataFrame({'month': months, \n 'day': days, \n 'hour': hours, \n 'minute': minutes, \n 'weekday': weekday, \n 'pwr_kw': profile_df['Power, kW']})\n energy_profile_df = energy_profile_df.sort_values(by=['month', 'day', 'hour', 'minute'])\n energy_profile_df = energy_profile_df.reset_index()\n energy_profile_df['energy_kwh'] = energy_profile_df['pwr_kw']/4\n\n # Aggregate 15-min energy profile -> hourly energy profile\n hourly_energy_df = energy_profile_df.groupby(['month', 'day', 'hour', 'weekday'])['energy_kwh'].sum()\n hourly_energy_df = hourly_energy_df.reset_index()\n\n # Aggregate hourly energy profile -> monthly energy profile\n monthly_energy_df = hourly_energy_df.groupby('month')['energy_kwh'].sum()\n monthly_energy_df = monthly_energy_df.reset_index()\n \n # Calculate peak power by month\n monthly_peak_pwr_df = energy_profile_df.groupby('month')['pwr_kw'].max()\n monthly_peak_pwr_df = monthly_peak_pwr_df.reset_index()\n\n # Calculate annual energy\n annual_energy_kwh = monthly_energy_df['energy_kwh'].sum()\n\n # Determine times of peak demand\n peak_demand_times = []\n for month, peak_pwr_kw in zip(range(1,13), monthly_peak_pwr_df['pwr_kw']):\n peak_demand_dow = energy_profile_df[(energy_profile_df.month==month)&\\\n (energy_profile_df.pwr_kw==peak_pwr_kw)]['weekday'].values[0]\n peak_demand_hod = energy_profile_df[(energy_profile_df.month==month)&\\\n (energy_profile_df.pwr_kw==peak_pwr_kw)]['hour'].values[0]\n peak_demand_time = (peak_demand_dow, peak_demand_hod)\n peak_demand_times.append(peak_demand_time)\n\n # Filter ineligible rates by peak capacity, energy consumption limits\n def is_eligible(rates, monthly_energy, monthly_peak_pwr):\n eligible = ((rates['peakkwcapacitymin'] <= monthly_peak_pwr.min())&\n (rates['peakkwcapacitymax'] >= monthly_peak_pwr.max())&\n (rates['peakkwhusagemin'] <= monthly_energy.min())&\n (rates['peakkwhusagemax'] >= monthly_energy.max()))\n return eligible\n\n eligibility = is_eligible(self.com_rate_data, monthly_energy_df['energy_kwh'], monthly_peak_pwr_df['pwr_kw'])\n\n self.com_rate_data['eligible'] = eligibility\n eligible_rates = self.com_rate_data[self.com_rate_data.eligible==True]\n print_str = \"\"\"rates determined to be ineligible for {} (violated peak capacity/energy consumption constraints)\"\"\".format(p)\n logging.info(len(self.com_rate_data[self.com_rate_data.eligible==False]), print_str)\n\n ### ###\n ## Calculate cost of electricity ##\n ### ###\n\n # Energy rates == 0 if NULL; Max = inf if NULL\n for tier in range(11):\n maxim = 'energyratestructure/period0/tier{}max'.format(tier)\n rate = 'energyratestructure/period0/tier{}rate'.format(tier)\n adj = 'energyratestructure/period0/tier{}adj'.format(tier)\n eligible_rates[maxim] = eligible_rates[maxim].fillna(np.inf)\n eligible_rates[rate] = eligible_rates[rate].fillna(0)\n eligible_rates[adj] = eligible_rates[adj].fillna(0)\n \n for tier in range(8):\n maxim = 'energyratestructure/period1/tier{}max'.format(tier)\n rate = 'energyratestructure/period1/tier{}rate'.format(tier)\n adj = 'energyratestructure/period1/tier{}adj'.format(tier)\n eligible_rates[maxim] = eligible_rates[maxim].fillna(np.inf)\n eligible_rates[rate] = eligible_rates[rate].fillna(0)\n eligible_rates[adj] = eligible_rates[adj].fillna(0)\n\n for period in range(2,6):\n for tier in range(5):\n maxim = 'energyratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'energyratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'energyratestructure/period{0}/tier{1}adj'.format(period, tier)\n eligible_rates[maxim] = eligible_rates[maxim].fillna(np.inf)\n eligible_rates[rate] = eligible_rates[rate].fillna(0)\n eligible_rates[adj] = eligible_rates[adj].fillna(0)\n \n for period in range(6,24):\n maxim = 'energyratestructure/period{}/tier0max'.format(period)\n rate = 'energyratestructure/period{}/tier0rate'.format(period)\n adj = 'energyratestructure/period{}/tier0adj'.format(period)\n eligible_rates[maxim] = eligible_rates[maxim].fillna(np.inf)\n eligible_rates[rate] = eligible_rates[rate].fillna(0)\n eligible_rates[adj] = eligible_rates[adj].fillna(0)\n\n # Calculate annual fixed cost charge (1st meter)\n logging.info(\"Starting annual fixed cost calculations for {}...\".format(p))\n eligible_rates['annual_fixed_cost'] = eligible_rates['fixedchargefirstmeter'] * 12\n eligible_rates = eligible_rates[eligible_rates.annual_fixed_cost >= 0]\n logging.info(\"Annual fixed cost calculations complete.\")\n\n # Characterize rates (demand/no-demand)\n flat_dmd_rates = eligible_rates[~eligible_rates['flatdemandstructure/period0/tier0rate'].isnull()]\n flat_dmd_rates['demand_type'] = 'flat'\n\n tou_dmd_rates = eligible_rates[(eligible_rates['flatdemandstructure/period0/tier0rate'].isnull())&\n (~eligible_rates['demandratestructure/period0/tier0rate'].isnull())]\n tou_dmd_rates['demand_type'] = 'tou'\n\n no_dmd_rates = eligible_rates[(eligible_rates['flatdemandstructure/period0/tier0rate'].isnull())&\n (eligible_rates['demandratestructure/period0/tier0rate'].isnull())]\n no_dmd_rates['demand_type'] = 'none'\n\n # Demand Charge Rates = 0 when NULL; max = inf when NULL\n for tier in range(17):\n maxim = 'flatdemandstructure/period0/tier{}max'.format(tier)\n rate = 'flatdemandstructure/period0/tier{}rate'.format(tier)\n adj = 'flatdemandstructure/period0/tier{}adj'.format(tier)\n flat_dmd_rates[maxim] = flat_dmd_rates[maxim].fillna(np.inf)\n tou_dmd_rates[maxim] = tou_dmd_rates[maxim].fillna(np.inf)\n no_dmd_rates[maxim] = no_dmd_rates[maxim].fillna(np.inf)\n flat_dmd_rates[rate] = flat_dmd_rates[rate].fillna(0)\n tou_dmd_rates[rate] = tou_dmd_rates[rate].fillna(0)\n no_dmd_rates[rate] = no_dmd_rates[rate].fillna(0)\n flat_dmd_rates[adj] = flat_dmd_rates[adj].fillna(0)\n tou_dmd_rates[adj] = tou_dmd_rates[adj].fillna(0)\n no_dmd_rates[adj] = no_dmd_rates[adj].fillna(0)\n \n for tier in range(5):\n maxim = 'flatdemandstructure/period1/tier{}max'.format(tier)\n rate = 'flatdemandstructure/period1/tier{}rate'.format(tier)\n adj = 'flatdemandstructure/period1/tier{}adj'.format(tier)\n flat_dmd_rates[maxim] = flat_dmd_rates[maxim].fillna(np.inf)\n tou_dmd_rates[maxim] = tou_dmd_rates[maxim].fillna(np.inf)\n no_dmd_rates[maxim] = no_dmd_rates[maxim].fillna(np.inf)\n flat_dmd_rates[rate] = flat_dmd_rates[rate].fillna(0)\n tou_dmd_rates[rate] = tou_dmd_rates[rate].fillna(0)\n no_dmd_rates[rate] = no_dmd_rates[rate].fillna(0)\n flat_dmd_rates[adj] = flat_dmd_rates[adj].fillna(0)\n tou_dmd_rates[adj] = tou_dmd_rates[adj].fillna(0)\n no_dmd_rates[adj] = no_dmd_rates[adj].fillna(0)\n \n for tier in range(3):\n maxim = 'flatdemandstructure/period2/tier{}max'.format(tier)\n rate = 'flatdemandstructure/period2/tier{}rate'.format(tier)\n adj = 'flatdemandstructure/period2/tier{}adj'.format(tier)\n flat_dmd_rates[maxim] = flat_dmd_rates[maxim].fillna(np.inf)\n tou_dmd_rates[maxim] = tou_dmd_rates[maxim].fillna(np.inf)\n no_dmd_rates[maxim] = no_dmd_rates[maxim].fillna(np.inf)\n flat_dmd_rates[rate] = flat_dmd_rates[rate].fillna(0)\n tou_dmd_rates[rate] = tou_dmd_rates[rate].fillna(0)\n no_dmd_rates[rate] = no_dmd_rates[rate].fillna(0)\n flat_dmd_rates[adj] = flat_dmd_rates[adj].fillna(0)\n tou_dmd_rates[adj] = tou_dmd_rates[adj].fillna(0)\n no_dmd_rates[adj] = no_dmd_rates[adj].fillna(0)\n \n for period in range(3,8):\n maxim = 'flatdemandstructure/period{}/tier0max'.format(period)\n rate = 'flatdemandstructure/period{}/tier0rate'.format(period)\n adj = 'flatdemandstructure/period{}/tier0adj'.format(period)\n flat_dmd_rates[maxim] = flat_dmd_rates[maxim].fillna(np.inf)\n tou_dmd_rates[maxim] = tou_dmd_rates[maxim].fillna(np.inf)\n no_dmd_rates[maxim] = no_dmd_rates[maxim].fillna(np.inf)\n flat_dmd_rates[rate] = flat_dmd_rates[rate].fillna(0)\n tou_dmd_rates[rate] = tou_dmd_rates[rate].fillna(0)\n no_dmd_rates[rate] = no_dmd_rates[rate].fillna(0)\n flat_dmd_rates[adj] = flat_dmd_rates[adj].fillna(0)\n tou_dmd_rates[adj] = tou_dmd_rates[adj].fillna(0)\n no_dmd_rates[adj] = no_dmd_rates[adj].fillna(0)\n \n for period in range(2):\n for tier in range(16):\n maxim = 'demandratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'demandratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'demandratestructure/period{0}/tier{1}adj'.format(period, tier)\n flat_dmd_rates[maxim] = flat_dmd_rates[maxim].fillna(np.inf)\n tou_dmd_rates[maxim] = tou_dmd_rates[maxim].fillna(np.inf)\n no_dmd_rates[maxim] = no_dmd_rates[maxim].fillna(np.inf)\n flat_dmd_rates[rate] = flat_dmd_rates[rate].fillna(0)\n tou_dmd_rates[rate] = tou_dmd_rates[rate].fillna(0)\n no_dmd_rates[rate] = no_dmd_rates[rate].fillna(0)\n flat_dmd_rates[adj] = flat_dmd_rates[adj].fillna(0)\n tou_dmd_rates[adj] = tou_dmd_rates[adj].fillna(0)\n no_dmd_rates[adj] = no_dmd_rates[adj].fillna(0)\n\n for period in range(2, 4):\n for tier in range(3):\n maxim = 'demandratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'demandratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'demandratestructure/period{0}/tier{1}adj'.format(period, tier)\n flat_dmd_rates[maxim] = flat_dmd_rates[maxim].fillna(np.inf)\n tou_dmd_rates[maxim] = tou_dmd_rates[maxim].fillna(np.inf)\n no_dmd_rates[maxim] = no_dmd_rates[maxim].fillna(np.inf)\n flat_dmd_rates[rate] = flat_dmd_rates[rate].fillna(0)\n tou_dmd_rates[rate] = tou_dmd_rates[rate].fillna(0)\n no_dmd_rates[rate] = no_dmd_rates[rate].fillna(0)\n flat_dmd_rates[adj] = flat_dmd_rates[adj].fillna(0)\n tou_dmd_rates[adj] = tou_dmd_rates[adj].fillna(0)\n no_dmd_rates[adj] = no_dmd_rates[adj].fillna(0)\n\n for tier in range(2):\n maxim = 'demandratestructure/period4/tier{}max'.format(tier)\n rate = 'demandratestructure/period4/tier{}rate'.format(tier)\n adj = 'demandratestructure/period4/tier{}adj'.format(tier)\n flat_dmd_rates[maxim] = flat_dmd_rates[maxim].fillna(np.inf)\n tou_dmd_rates[maxim] = tou_dmd_rates[maxim].fillna(np.inf)\n no_dmd_rates[maxim] = no_dmd_rates[maxim].fillna(np.inf)\n flat_dmd_rates[rate] = flat_dmd_rates[rate].fillna(0)\n tou_dmd_rates[rate] = tou_dmd_rates[rate].fillna(0)\n no_dmd_rates[rate] = no_dmd_rates[rate].fillna(0)\n flat_dmd_rates[adj] = flat_dmd_rates[adj].fillna(0)\n tou_dmd_rates[adj] = tou_dmd_rates[adj].fillna(0)\n no_dmd_rates[adj] = no_dmd_rates[adj].fillna(0)\n \n for period in range(5,9):\n maxim = 'demandratestructure/period{}/tier0max'.format(period)\n rate = 'demandratestructure/period{}/tier0rate'.format(period)\n adj = 'demandratestructure/period{}/tier0adj'.format(period)\n flat_dmd_rates[maxim] = flat_dmd_rates[maxim].fillna(np.inf)\n tou_dmd_rates[maxim] = tou_dmd_rates[maxim].fillna(np.inf)\n no_dmd_rates[maxim] = no_dmd_rates[maxim].fillna(np.inf)\n flat_dmd_rates[rate] = flat_dmd_rates[rate].fillna(0)\n tou_dmd_rates[rate] = tou_dmd_rates[rate].fillna(0)\n no_dmd_rates[rate] = no_dmd_rates[rate].fillna(0)\n flat_dmd_rates[adj] = flat_dmd_rates[adj].fillna(0)\n tou_dmd_rates[adj] = tou_dmd_rates[adj].fillna(0)\n no_dmd_rates[adj] = no_dmd_rates[adj].fillna(0)\n\n # Calculate annual demand charges\n logging.info(\"Starting annual demand cost calculations for {}...\".format(p))\n ## Flat-demand rates\n annual_dmd_charges = []\n for i in range(len(flat_dmd_rates)):\n periods = []\n for month in range(1,13):\n flat_dmd_mnth = \"flatdemandmonth{}\".format(month)\n period = int(flat_dmd_rates.iloc[i][flat_dmd_mnth])\n periods.append(period)\n\n annual_dmd_charge = 0\n for month, period in zip(monthly_peak_pwr_df['month'], periods):\n peak_pwr = monthly_peak_pwr_df[monthly_peak_pwr_df.month==month]['pwr_kw'].values[0]\n if period == 0:\n for tier in range(17):\n maxim = 'flatdemandstructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'flatdemandstructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'flatdemandstructure/period{0}/tier{1}adj'.format(period, tier)\n dmd_rate = flat_dmd_rates.iloc[i][rate] + flat_dmd_rates.iloc[i][adj]\n dmd_charge = peak_pwr * dmd_rate\n if peak_pwr <= flat_dmd_rates.iloc[i][maxim]:\n annual_dmd_charge += dmd_charge\n break\n else:\n continue \n \n elif period == 1:\n for tier in range(5):\n maxim = 'flatdemandstructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'flatdemandstructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'flatdemandstructure/period{0}/tier{1}adj'.format(period, tier)\n dmd_rate = flat_dmd_rates.iloc[i][rate] + flat_dmd_rates.iloc[i][adj]\n dmd_charge = peak_pwr * dmd_rate\n if peak_pwr <= flat_dmd_rates.iloc[i][maxim]:\n annual_dmd_charge += dmd_charge\n break\n else:\n continue\n \n elif period == 2:\n for tier in range(3):\n maxim = 'flatdemandstructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'flatdemandstructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'flatdemandstructure/period{0}/tier{1}adj'.format(period, tier)\n dmd_rate = flat_dmd_rates.iloc[i][rate] + flat_dmd_rates.iloc[i][adj]\n dmd_charge = peak_pwr * dmd_rate\n if peak_pwr <= flat_dmd_rates.iloc[i][maxim]:\n annual_dmd_charge += dmd_charge\n break\n else:\n continue\n \n else:\n maxim = 'flatdemandstructure/period{0}/tier0max'.format(period)\n rate = 'flatdemandstructure/period{0}/tier0rate'.format(period)\n adj = 'flatdemandstructure/period{0}/tier0adj'.format(period)\n dmd_rate = flat_dmd_rates.iloc[i][rate] + flat_dmd_rates.iloc[i][adj]\n dmd_charge = peak_pwr * dmd_rate\n annual_dmd_charge += dmd_charge\n\n annual_dmd_charges.append(annual_dmd_charge)\n \n flat_dmd_rates['annual_demand_cost'] = annual_dmd_charges\n flat_dmd_rates = flat_dmd_rates[flat_dmd_rates.annual_demand_cost>=0] #remove negative demand costs\n\n ## TOU-demand rates\n annual_dmd_charges = []\n for i in range(len(tou_dmd_rates)):\n wkday_dmd_tou_periods = []\n wknd_dmd_tou_periods = []\n for monthly_wkday_dmd_tou in tou_dmd_rates['demandweekdayschedule'].iloc[i].replace('L','').replace('[', '').split(']')[:-2]:\n wkday_dmd_tou_period = monthly_wkday_dmd_tou.replace(' ','').split(',')\n if len(wkday_dmd_tou_period) == 24:\n wkday_dmd_tou_periods.append(wkday_dmd_tou_period)\n \n elif len(wkday_dmd_tou_period) == 25:\n wkday_dmd_tou_periods.append(wkday_dmd_tou_period[1:])\n\n for monthly_wknd_dmd_tou in tou_dmd_rates['demandweekendschedule'].iloc[i].replace('L','').replace('[', '').split(']')[:-2]:\n wknd_dmd_tou_period = monthly_wknd_dmd_tou.replace(' ','').split(',')\n if len(wknd_dmd_tou_period) == 24:\n wknd_dmd_tou_periods.append(wknd_dmd_tou_period)\n \n elif len(wknd_dmd_tou_period) == 25:\n wknd_dmd_tou_periods.append(wknd_dmd_tou_period[1:])\n\n periods = []\n for month_idx, peak_time in enumerate(peak_demand_times):\n dow = peak_time[0]\n hr = peak_time[1]\n\n if dow < 5:\n periods.append(wkday_dmd_tou_periods[month_idx][hr])\n \n elif dow in [5,6]:\n periods.append(wknd_dmd_tou_periods[month_idx][hr])\n\n annual_dmd_charge = 0\n for month, period in zip(monthly_peak_pwr_df['month'], periods):\n peak_pwr = monthly_peak_pwr_df[monthly_peak_pwr_df.month==month]['pwr_kw'].values[0]\n \n if period in [0,1]:\n for tier in range(16):\n maxim = 'demandratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'demandratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'demandratestructure/period{0}/tier{1}adj'.format(period, tier)\n dmd_rate = tou_dmd_rates.iloc[i][rate] + tou_dmd_rates.iloc[i][adj]\n dmd_charge = peak_pwr * dmd_rate\n if peak_pwr <= tou_dmd_rates.iloc[i][maxim]:\n annual_dmd_charge += dmd_charge\n break\n else:\n continue \n \n elif period in [2,3]:\n for tier in range(3):\n maxim = 'demandratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'demandratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'demandratestructure/period{0}/tier{1}adj'.format(period, tier)\n dmd_rate = tou_dmd_rates.iloc[i][rate] + tou_dmd_rates.iloc[i][adj]\n dmd_charge = peak_pwr * dmd_rate\n if peak_pwr <= tou_dmd_rates.iloc[i][maxim]:\n annual_dmd_charge += dmd_charge\n break\n else:\n continue \n \n elif period == 4:\n for tier in range(2):\n maxim = 'demandratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'demandratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'demandratestructure/period{0}/tier{1}adj'.format(period, tier)\n dmd_rate = tou_dmd_rates.iloc[i][rate] + tou_dmd_rates.iloc[i][adj]\n dmd_charge = peak_pwr * dmd_rate\n if peak_pwr <= tou_dmd_rates.iloc[i][maxim]:\n annual_dmd_charge += dmd_charge\n break\n else:\n continue \n \n else:\n maxim = 'demandratestructure/period{0}/tier0max'.format(period)\n rate = 'demandratestructure/period{0}/tier0rate'.format(period)\n adj = 'demandratestructure/period{0}/tier0adj'.format(period)\n dmd_rate = tou_dmd_rates.iloc[i][rate] + tou_dmd_rates.iloc[i][adj]\n dmd_charge = peak_pwr * dmd_rate\n annual_dmd_charge += dmd_charge\n \n annual_dmd_charges.append(annual_dmd_charge)\n\n tou_dmd_rates['annual_demand_cost'] = annual_dmd_charges\n tou_dmd_rates = tou_dmd_rates[tou_dmd_rates.annual_demand_cost>=0] #remove negative demand costs\n\n ## No-demand rates\n no_dmd_rates['annual_demand_cost'] = 0\n logging.info(\"Annual demand cost calculations complete.\")\n\n eligible_rates = pd.concat([flat_dmd_rates, tou_dmd_rates, no_dmd_rates])\n\n # Calculate annual energy charges\n logging.info(\"Starting annual energy cost calculations for {0} ({1} total)...\".format(p, len(eligible_rates)))\n annual_energy_costs = []\n for i in range(len(eligible_rates)):\n if (i % 10 == 0) and (i!=0):\n logging.info(\"{0}/{1} rates completed\".format(i, len(eligible_rates)))\n wkday_tou_periods = []\n wknd_tou_periods = []\n for monthly_wkday_tou in eligible_rates['energyweekdayschedule'].iloc[i].replace('L','').replace('[', '').split(']')[:-2]: \n wkday_tou_period = monthly_wkday_tou.replace(' ','').split(',')\n if len(wkday_tou_period) == 24:\n wkday_tou_periods.append(wkday_tou_period)\n \n elif len(wkday_tou_period) == 25:\n wkday_tou_periods.append(wkday_tou_period[1:])\n\n for monthly_wknd_tou in eligible_rates['energyweekendschedule'].iloc[i].replace('L','').replace('[', '').split(']')[:-2]:\n wknd_tou_period = monthly_wknd_tou.replace(' ','').split(',')\n if len(wknd_tou_period) == 24:\n wknd_tou_periods.append(wknd_tou_period)\n \n elif len(wknd_tou_period) == 25:\n wknd_tou_periods.append(wknd_tou_period[1:]) \n \n periods = []\n for month, dow, hr, energy_kwh in zip(hourly_energy_df['month'], \n hourly_energy_df['weekday'], \n hourly_energy_df['hour'],\n hourly_energy_df['energy_kwh']):\n month_idx = month - 1\n\n if dow < 5:\n periods.append(wkday_tou_periods[month_idx][hr])\n \n elif dow in [5,6]:\n periods.append(wknd_tou_periods[month_idx][hr])\n \n annual_energy_cost = 0\n prev_month = 1 #init prev month var\n month_energy = 0 #init monthly energy tracking\n for month, period, energy_kwh in zip(hourly_energy_df['month'], periods, hourly_energy_df['energy_kwh']):\n period = int(period)\n \n #update monthly energy\n if month == prev_month:\n month_energy += float(energy_kwh)\n \n else:\n month_energy = energy_kwh\n \n prev_month = month\n\n if period == 0:\n for tier in range(11):\n maxim = 'energyratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'energyratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'energyratestructure/period{0}/tier{1}adj'.format(period, tier)\n energy_cost = eligible_rates.iloc[i][rate] + eligible_rates.iloc[i][adj]\n hourly_energy_cost = energy_kwh * energy_cost\n tier_max = float(eligible_rates.iloc[i][maxim])\n\n if month_energy <= tier_max:\n annual_energy_cost += hourly_energy_cost\n break\n else:\n continue \n \n elif period == 1:\n for tier in range(8):\n maxim = 'energyratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'energyratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'energyratestructure/period{0}/tier{1}adj'.format(period, tier)\n energy_cost = eligible_rates.iloc[i][rate] + eligible_rates.iloc[i][adj]\n hourly_energy_cost = energy_kwh * energy_cost\n tier_max = float(eligible_rates.iloc[i][maxim])\n if month_energy <= tier_max:\n annual_energy_cost += hourly_energy_cost\n break\n else:\n continue \n \n elif period in range(2,6):\n for tier in range(5):\n maxim = 'energyratestructure/period{0}/tier{1}max'.format(period, tier)\n rate = 'energyratestructure/period{0}/tier{1}rate'.format(period, tier)\n adj = 'energyratestructure/period{0}/tier{1}adj'.format(period, tier)\n energy_cost = eligible_rates.iloc[i][rate] + eligible_rates.iloc[i][adj]\n hourly_energy_cost = energy_kwh * energy_cost\n tier_max = float(eligible_rates.iloc[i][maxim])\n if month_energy <= tier_max:\n annual_energy_cost += hourly_energy_cost\n break\n else:\n continue \n \n else:\n maxim = 'energyratestructure/period{0}/tier0max'.format(period)\n rate = 'energyratestructure/period{0}/tier0rate'.format(period)\n adj = 'energyratestructure/period{0}/tier0adj'.format(period)\n energy_cost = eligible_rates.iloc[i][rate] + eligible_rates.iloc[i][adj]\n hourly_energy_cost = energy_kwh * energy_cost\n annual_energy_cost += hourly_energy_cost\n\n annual_energy_costs.append(annual_energy_cost)\n\n eligible_rates['annual_energy_cost'] = annual_energy_costs\n eligible_rates = eligible_rates[eligible_rates.annual_energy_cost>=0] #remove negative energy costs\n logging.info(\"{} - Annual energy cost calculations complete.\".format(p))\n\n eligible_rates['annual_cost_total'] = eligible_rates['annual_fixed_cost'] + eligible_rates['annual_demand_cost'] + eligible_rates['annual_energy_cost']\n new_field = '{}_lvl_cost_per_kwh'.format(p)\n eligible_rates[new_field] = eligible_rates['annual_cost_total']/annual_energy_kwh\n\n eligible_rates.to_csv(outpath+'dcfc_rates_{}.csv'.format(p), index=False)"
] |
[
[
"numpy.array",
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
kamikaze0923/jtvae
|
[
"0292265bb97c31a925a03b18a3d3b38c560c89c6"
] |
[
"molvae/sample.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torch.autograd import Variable\n\nimport math, random, sys\nfrom optparse import OptionParser\nfrom collections import deque\nimport rdkit\nimport rdkit.Chem as Chem\nfrom rdkit.Chem import Draw\n\nfrom jtnn import *\n\nlg = rdkit.RDLogger.logger() \nlg.setLevel(rdkit.RDLogger.CRITICAL)\n\nparser = OptionParser()\nparser.add_option(\"-n\", \"--nsample\", dest=\"nsample\")\nparser.add_option(\"-v\", \"--vocab\", dest=\"vocab_path\")\nparser.add_option(\"-m\", \"--model\", dest=\"model_path\")\nparser.add_option(\"-w\", \"--hidden\", dest=\"hidden_size\", default=200)\nparser.add_option(\"-l\", \"--latent\", dest=\"latent_size\", default=56)\nparser.add_option(\"-d\", \"--depth\", dest=\"depth\", default=3)\nparser.add_option(\"-e\", \"--stereo\", dest=\"stereo\", default=1)\nopts,args = parser.parse_args()\n \nvocab = [x.strip(\"\\r\\n \") for x in open(opts.vocab_path)] \nvocab = Vocab(vocab)\n\nhidden_size = int(opts.hidden_size)\nlatent_size = int(opts.latent_size)\ndepth = int(opts.depth)\nnsample = int(opts.nsample)\nstereo = True if int(opts.stereo) == 1 else False\n\nmodel = JTNNVAE(vocab, hidden_size, latent_size, depth, stereo=stereo)\nload_dict = torch.load(opts.model_path)\nmissing = {k: v for k, v in list(model.state_dict().items()) if k not in load_dict}\nload_dict.update(missing) \nmodel.load_state_dict(load_dict)\nmodel = model.cuda()\n\ntorch.manual_seed(0)\nfor i in range(nsample):\n print(model.sample_prior(prob_decode=False))\n"
] |
[
[
"torch.manual_seed",
"torch.load"
]
] |
mspectorgoogle/clusterfuzz
|
[
"44df69cbcb94efc212f27758d45d6ff0f36061e5"
] |
[
"src/clusterfuzz/_internal/bot/fuzzers/ml/rnn/train.py"
] |
[
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Train ml rnn model.\"\"\"\n\nimport argparse\nimport math\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom clusterfuzz._internal.bot.fuzzers.ml.rnn import constants\nfrom clusterfuzz._internal.bot.fuzzers.ml.rnn import utils\n\n# Training suggestions\n#\n# Training only:\n# Leave all the parameters as they are in constants.py.\n# Disable validation to run a bit faster (set validation=False).\n# You can follow progress in Tensorboard: tensorboard --logdir=log\n#\n# Training and experimenting (default):\n# Keep validation enabled.\n# You can now play with the parameters and follow the effects in\n# Tensorboard.\n# A good choice of parameters ensures that the testing and validation\n# curves stay close. To see the curves drift apart (\"overfitting\") try\n# to use an insufficient amount of training data.\n\n\[email protected]\ndef train_step(model, optimizer, input_data, expected_data, train=False):\n \"\"\"Train the model for one step.\n\n Args:\n model: RNN model to train/predict.\n optimize: optimizer to use to train the model.\n input_data: input sequence to the model.\n expected_data: expected output of the model.\n\n Returns:\n Tuple containing the sequential loss between the expected output and the\n real output, the batch loss between the two, the accuracy metric value as\n well as the most likely predicted output.\n \"\"\"\n with tf.GradientTape() as tape:\n predicted_data = model(input_data)\n loss = tf.keras.losses.sparse_categorical_crossentropy(\n expected_data, predicted_data, from_logits=True)\n seq_loss = tf.reduce_mean(input_tensor=loss, axis=1)\n batch_loss = tf.reduce_mean(input_tensor=seq_loss)\n\n output_bytes = tf.cast(\n tf.argmax(predicted_data, axis=-1), expected_data.dtype)\n accuracy = tf.reduce_mean(\n tf.cast(tf.equal(expected_data, output_bytes), tf.float32))\n\n if train:\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n return seq_loss, batch_loss, accuracy, output_bytes\n\n\ndef main(args):\n \"\"\"Main function to train the model.\n\n Args:\n args: Parsed arguments.\n\n Returns:\n Execution status defined by `constants.ExitCode`.\n \"\"\"\n # Validate paths.\n if not validate_paths(args):\n return constants.ExitCode.INVALID_PATH\n\n # Extract paths.\n input_dir = args.input_dir\n model_dir = args.model_dir\n log_dir = args.log_dir\n existing_model = args.existing_model\n\n # Extract model parameters.\n batch_size = args.batch_size\n dropout_pkeep = args.dropout_pkeep\n hidden_state_size = args.hidden_state_size\n hidden_layer_size = args.hidden_layer_size\n learning_rate = args.learning_rate\n\n # Extract additional flags.\n debug = args.debug\n validation = args.validation\n\n # Split corpus for training and validation.\n # validation_text will be empty if validation is False.\n code_text, validation_text, input_ranges = utils.read_data_files(\n input_dir, validation=validation)\n\n # Bail out if we don't have enough corpus for training.\n if len(code_text) < batch_size * constants.TRAINING_SEQLEN + 1:\n return constants.ExitCode.CORPUS_TOO_SMALL\n\n # Get corpus files info. Will be used in debug mode to generate sample text.\n files_info_list = []\n if debug:\n files_info_list = utils.get_files_info(input_dir)\n assert files_info_list\n\n # Calculate validation batch size. It will be 0 if we choose not to validate.\n validation_batch_size = len(validation_text) // constants.VALIDATION_SEQLEN\n\n # Display some stats on the data.\n epoch_size = len(code_text) // (batch_size * constants.TRAINING_SEQLEN)\n utils.print_data_stats(len(code_text), len(validation_text), epoch_size)\n\n # Set global random seed, so any random sequence generated is repeatable.\n # It could also be removed.\n tf.random.set_seed(0)\n\n # Build the RNN model.\n model = utils.build_model(hidden_layer_size * hidden_state_size,\n dropout_pkeep, batch_size, debug)\n\n # Choose Adam optimizer to compute gradients.\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n\n # Init Tensorboard stuff.\n # This will save Tensorboard information in folder specified in command line.\n # Two sets of data are saved so that you can compare training and\n # validation curves visually in Tensorboard.\n timestamp = str(math.trunc(time.time()))\n summary_writer = tf.summary.create_file_writer(\n os.path.join(log_dir, timestamp + '-training'))\n validation_writer = tf.summary.create_file_writer(\n os.path.join(log_dir, timestamp + '-validation'))\n\n # For display: init the progress bar.\n step_size = batch_size * constants.TRAINING_SEQLEN\n frequency = constants.DISPLAY_FREQ * step_size\n progress = utils.Progress(\n constants.DISPLAY_FREQ,\n size=constants.DISPLAY_LEN,\n msg='Training on next {} batches'.format(constants.DISPLAY_FREQ))\n\n # We continue training on existing model, or start with a new model.\n if existing_model:\n print('Continue training on existing model: {}'.format(existing_model))\n try:\n model.load_weights(existing_model)\n except:\n print(\n ('Failed to restore existing model since model '\n 'parameters do not match.'),\n file=sys.stderr)\n return constants.ExitCode.TENSORFLOW_ERROR\n else:\n print('No existing model provided. Start training with a new model.')\n\n # Num of bytes we have trained so far.\n steps = 0\n\n # Training loop.\n for input_batch, expected_batch, epoch in utils.rnn_minibatch_sequencer(\n code_text,\n batch_size,\n constants.TRAINING_SEQLEN,\n nb_epochs=constants.EPOCHS):\n\n # Train on one mini-batch.\n seq_loss, batch_loss, accuracy, output_bytes = train_step(\n model, optimizer, input_batch, expected_batch, train=True)\n\n # Log training data for Tensorboard display a mini-batch of sequences\n # every `frequency` batches.\n if debug and steps % frequency == 0:\n utils.print_learning_learned_comparison(\n input_batch, output_bytes, seq_loss, input_ranges, batch_loss,\n accuracy, epoch_size, steps, epoch)\n with summary_writer.as_default(): # pylint: disable=not-context-manager\n tf.summary.scalar('batch_loss', batch_loss, step=steps)\n tf.summary.scalar('batch_accuracy', accuracy, step=steps)\n summary_writer.flush()\n\n # Run a validation step every `frequency` batches.\n # The validation text should be a single sequence but that's too slow.\n # We cut it up and batch the pieces (slightly inaccurate).\n if validation and steps % frequency == 0 and validation_batch_size:\n utils.print_validation_header(len(code_text), input_ranges)\n validation_x, validation_y, _ = next(\n utils.rnn_minibatch_sequencer(validation_text, validation_batch_size,\n constants.VALIDATION_SEQLEN, 1))\n\n validation_model = utils.build_model(\n hidden_layer_size * hidden_state_size, dropout_pkeep,\n validation_batch_size, False)\n last_weights = tf.train.latest_checkpoint(model_dir)\n if last_weights:\n validation_model.load_weights(tf.train.latest_checkpoint(model_dir))\n validation_model.build(tf.TensorShape([validation_batch_size, None]))\n validation_model.reset_states()\n\n # Run one single inference step\n _, batch_loss, accuracy, _ = train_step(\n validation_model, optimizer, validation_x, validation_y, train=False)\n\n utils.print_validation_stats(batch_loss, accuracy)\n\n # Save validation data for Tensorboard.\n with validation_writer.as_default(): # pylint: disable=not-context-manager\n tf.summary.scalar('batch_loss', batch_loss, step=steps)\n tf.summary.scalar('batch_accuracy', accuracy, step=steps)\n validation_writer.flush()\n\n # Display a short text generated with the current weights and biases.\n # If enabled, there will be a large output.\n if debug and steps // 4 % frequency == 0:\n utils.print_text_generation_header()\n file_info = utils.random_element_from_list(files_info_list)\n first_byte, file_size = file_info['first_byte'], file_info['file_size']\n ry = np.array([[first_byte]])\n sample = [first_byte]\n\n generation_model = utils.build_model(\n hidden_layer_size * hidden_state_size, dropout_pkeep, 1, False)\n last_weights = tf.train.latest_checkpoint(model_dir)\n if last_weights:\n generation_model.load_weights(tf.train.latest_checkpoint(model_dir))\n generation_model.build(tf.TensorShape([1, None]))\n generation_model.reset_states()\n\n for _ in range(file_size - 1):\n prediction = generation_model(ry)\n prediction = tf.squeeze(prediction, 0).numpy()\n rc = utils.sample_from_probabilities(\n prediction, topn=10 if epoch <= 1 else 2)\n sample.append(rc)\n ry = np.array([[rc]])\n\n print(repr(utils.decode_to_text(sample)))\n utils.print_text_generation_footer()\n\n # Save a checkpoint every `10 * frequency` batches. Each checkpoint is\n # a version of model.\n if steps // 10 % frequency == 0:\n saved_model_name = constants.RNN_MODEL_NAME + '_' + timestamp\n saved_model_path = os.path.join(model_dir, saved_model_name)\n model.save_weights(saved_model_path)\n print('Saved model: {}'.format(saved_model_path))\n\n # Display progress bar.\n if debug:\n progress.step(reset=steps % frequency == 0)\n\n # Update state.\n steps += step_size\n\n # Save the model after training is done.\n saved_model_name = constants.RNN_MODEL_NAME + '_' + timestamp\n saved_model_path = os.path.join(model_dir, saved_model_name)\n model.save_weights(saved_model_path)\n print('Saved model: {}'.format(saved_model_path))\n\n return constants.ExitCode.SUCCESS\n\n\ndef validate_paths(args):\n \"\"\"Validate paths.\n\n Args:\n args: Parsed arguments.\n\n Returns:\n True if all paths are valid, False otherwise.\n \"\"\"\n if not os.path.exists(args.input_dir):\n print(\n 'Input directory {} does not exist'.format(args.input_dir),\n file=sys.stderr)\n return False\n\n if not os.path.exists(args.model_dir):\n os.mkdir(args.model_dir)\n\n if not os.path.exists(args.log_dir):\n os.mkdir(args.log_dir)\n\n if args.existing_model and not utils.validate_model_path(args.existing_model):\n print(\n 'Existing model {} does not exist'.format(args.existing_model),\n file=sys.stderr)\n return False\n\n return True\n\n\ndef parse_args():\n \"\"\"Parse command line arguments.\n\n Returns:\n Parsed arguement object.\n \"\"\"\n parser = argparse.ArgumentParser('Training RNN model on existing testcases')\n\n parser.add_argument('--input-dir', help='Input folder path', required=True)\n parser.add_argument('--log-dir', help='Log folder path', required=True)\n parser.add_argument('--model-dir', help='Path to save models', required=True)\n\n # Optional arguments: model parameters and additional flags.\n parser.add_argument(\n '--batch-size', help='Batch size', type=int, default=constants.BATCH_SIZE)\n parser.add_argument(\n '--debug', help='Print training progress', action='store_true')\n parser.add_argument(\n '--dropout-pkeep',\n help='Dropout probability (keep rate)',\n type=float,\n default=constants.DROPOUT_PKEEP)\n parser.add_argument(\n '--existing-model', help='Continue training on existing model')\n parser.add_argument(\n '--hidden-state-size',\n help='Hidden state size of LSTM cell',\n type=int,\n default=constants.HIDDEN_STATE_SIZE)\n parser.add_argument(\n '--hidden-layer-size',\n help='Hidden layer size of LSTM model',\n type=int,\n default=constants.HIDDEN_LAYER_SIZE)\n parser.add_argument(\n '--learning-rate',\n help='Learning rate',\n type=float,\n default=constants.LEARNING_RATE)\n parser.add_argument(\n '--validation',\n help='Print validation stats during training',\n action='store_true')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n parsed_args = parse_args()\n sys.exit(main(parsed_args))\n"
] |
[
[
"tensorflow.TensorShape",
"tensorflow.train.latest_checkpoint",
"tensorflow.reduce_mean",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.equal",
"tensorflow.squeeze",
"tensorflow.keras.optimizers.Adam",
"tensorflow.argmax",
"numpy.array",
"tensorflow.summary.scalar",
"tensorflow.random.set_seed",
"tensorflow.GradientTape"
]
] |
TokisakiKurumi2001/transformer-based
|
[
"36553721e167406fb98b118750d5fc1f21ea29e9"
] |
[
"Process.py"
] |
[
"import pandas as pd\nimport torchtext\nfrom torchtext.legacy import data\nfrom Tokenize import tokenize\nfrom Batch import MyIterator, batch_size_fn\nimport os\nimport dill as pickle\n\ndef read_data(opt):\n \n if opt.src_data is not None:\n try:\n opt.src_data = open(opt.src_data).read().strip().split('\\n')\n except:\n print(\"error: '\" + opt.src_data + \"' file not found\")\n quit()\n \n if opt.trg_data is not None:\n try:\n opt.trg_data = open(opt.trg_data).read().strip().split('\\n')\n except:\n print(\"error: '\" + opt.trg_data + \"' file not found\")\n quit()\n\ndef create_fields(opt):\n \n spacy_langs = ['en', 'vi', 'fr', 'de', 'es', 'pt', 'it', 'nl']\n if opt.src_lang not in spacy_langs:\n print('invalid src language: ' + opt.src_lang + 'supported languages : ' + spacy_langs) \n if opt.trg_lang not in spacy_langs:\n print('invalid trg language: ' + opt.trg_lang + 'supported languages : ' + spacy_langs)\n \n print(\"loading spacy tokenizers...\")\n \n t_src = tokenize(opt.src_lang)\n t_trg = tokenize(opt.trg_lang)\n\n TRG = data.Field(lower=True, tokenize=t_trg.tokenizer, init_token='<sos>', eos_token='<eos>')\n SRC = data.Field(lower=True, tokenize=t_src.tokenizer)\n\n if opt.load_weights is not None:\n try:\n print(\"loading presaved fields...\")\n print('opt.load_weights', opt.load_weights)\n SRC = pickle.load(open(f'{opt.load_weights}/SRC.pkl', 'rb'))\n TRG = pickle.load(open(f'{opt.load_weights}/TRG.pkl', 'rb'))\n except:\n print(\"error opening SRC.pkl and TRG.pkl field files, please ensure they are in \" + opt.load_weights + \"/\")\n quit()\n \n return(SRC, TRG)\n\ndef create_dataset(opt, SRC, TRG):\n\n print(\"creating dataset and iterator... \")\n\n raw_data = {'src' : [line for line in opt.src_data], 'trg': [line for line in opt.trg_data]}\n df = pd.DataFrame(raw_data, columns=[\"src\", \"trg\"])\n \n mask = (df['src'].str.count(' ') < opt.max_strlen) & (df['trg'].str.count(' ') < opt.max_strlen)\n df = df.loc[mask]\n\n df.to_csv(\"translate_transformer_temp.csv\", index=False)\n \n data_fields = [('src', SRC), ('trg', TRG)]\n train = data.TabularDataset('./translate_transformer_temp.csv', format='csv', fields=data_fields)\n\n train_iter = MyIterator(train, batch_size=opt.batchsize, device='cuda', #opt.device\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\n batch_size_fn=batch_size_fn, train=True, shuffle=True)\n \n # os.remove('translate_transformer_temp.csv')\n\n if opt.load_weights is None:\n SRC.build_vocab(train)\n TRG.build_vocab(train)\n if opt.checkpoint > 0:\n try:\n os.mkdir(\"weights\")\n except:\n print(\"weights folder already exists, run program with -load_weights weights to load them\")\n quit()\n pickle.dump(SRC, open('weights/SRC.pkl', 'wb'))\n pickle.dump(TRG, open('weights/TRG.pkl', 'wb'))\n\n opt.src_pad = SRC.vocab.stoi['<pad>']\n opt.trg_pad = TRG.vocab.stoi['<pad>']\n\n opt.train_len = get_len(train_iter)\n \n return train_iter\n\ndef get_len(train):\n\n for i, b in enumerate(train):\n pass\n \n return i\n"
] |
[
[
"pandas.DataFrame"
]
] |
damien-dumont/Image-Encryption
|
[
"490a62fc27dfde159962f32e82d174345e088176"
] |
[
"Rough code/Encryption code/V0.9.py"
] |
[
"import datetime\nimport rsa\nfrom math import sqrt\nimport numpy as np\nfrom PIL import Image\nimport random\nimport tkinter as tk\nimport tkinter as tk\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\n\nroot = tk.Tk()\nroot.title(\"Encryptor/Decryptor tool V1.0\")\n\nroot.rowconfigure(0, weight=1)\nroot.columnconfigure(0, minsize=100, weight=1)\n\ntxt_read = tk.Text(root, relief=tk.GROOVE, bd=1, width=50)\nleft_grid = tk.Frame(root)\nright_grid = tk.Frame(root)\n\n\n\n\ndef keygen():\n (publicKey, privateKey) = rsa.newkeys(2048) # Generate 2048 bits keys\n publicKeyPEM = publicKey.save_pkcs1().decode()\n privateKeyPEM = privateKey.save_pkcs1().decode()\n\n \"\"\"Save the current file as a new file.\"\"\"\n filepath = asksaveasfilename(\n defaultextension=\"txt\",\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")],\n )\n if not filepath:\n return\n with open(filepath, \"w\") as output_file:\n text = publicKeyPEM\n output_file.write(text)\n root.title(f\"Simple Text Editor - {filepath}\")\n\n\n\n filepath = asksaveasfilename(\n defaultextension=\"txt\",\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")],\n )\n if not filepath:\n return\n with open(filepath, \"w\") as output_file:\n text = privateKeyPEM\n output_file.write(text)\n root.title(f\"Simple Text Editor - {filepath}\")\n# with open(\"private.txt\", \"w\") as f: # To give the option as to where to save the files\n# f.write(privateKeyPEM)\n\n\n# with open(\"public.txt\", \"w\") as f:\n# f.write(publicKeyPEM)\n\n\n\n\n\n\ndef cypher():\n string = name.get()\n\n ############ 2.a\n now = datetime.datetime.now() # Add the date at the beginning of the message\n\n dt =[str(now.year), str(now.month), str(now.day), str(now.hour), str(now.minute)]\n for i in range(3): # Making sure that when the hour, day, minute... is a single digit, it is written as 2 digits starting with 0. 2021/11/9 8:4 becomes 2021/11/09 08:04\n if len(dt[i+1]) == 2:\n pass\n else:\n dt[i+1] = \"0\" + dt[i+1] \n\n date = \"[\" + dt[0] + \"/\" + dt[1] + \"/\" + dt[2] + \" \" + dt[3] + \":\" + dt[4] + \"] > \"\n string = date + string # Adding the date at the beginning of the message\n ############ 3\n with open(\"public.txt\", \"r\") as f:\n publicKeyPEM = f.read()\n publicKey = rsa.PublicKey.load_pkcs1(publicKeyPEM.encode('utf8')) # Encryption -> limited to ~220 characters because of the date and ID -> to test\n encMessage = rsa.encrypt(string.encode(),publicKey)\n ############ 3.a\n ID_w = \" \" # ID must be multiple of 3, see below, maximum size is 42 -> to test\n\n if len(ID_w)%3 != 0:\n loop = 3-(len(ID_w)%3)\n for i in range(loop):\n ID_w += \" \"\n\n ID_ord=[0]*len(ID_w)\n for i in range(len(ID_w)):\n ID_ord[i]=ord(ID_w[i]) # Id is converted from string to a list of numbers\n\n ########### 4\n Msg = list(encMessage)\n Msg = ID_ord + Msg # Inputting the ID at the start\n last_byte = int()\n\n if len(Msg)%3 != 0: # Msg size must be multiple of 3, because 3 ints = 1 pixel\n loops = 3-(len(Msg)%3)\n for i in range(loops):\n a = random.randint(0,255)\n Msg += [a]\n last_byte += loops # count the number of random numbers added, see below\n ########## 5\n size = len(Msg)/3\n\n if int(sqrt(size)) == sqrt(size):\n pass\n else:\n Missing = pow(int(sqrt(size)) + 1, 2) - size # Adding random numbers until the number of pixels is a square number\n for i in range((int(Missing)*3)-1):\n a = random.randint(0,255)\n Msg += [a]\n last_byte += (int(Missing)*3) # the last digit will be read later, to now how many digits we have to remove to get the (encrypted) message back\n Msg += [last_byte]\n ############# 6\n\n chunks, chunk_size = len(Msg), 3\n split = [ Msg[i:i+chunk_size] for i in range(0, chunks, chunk_size) ] # Message is split in chunks of 3 digits, 3 digits = one pixel (R,G,B)\n side = int(sqrt(len(Msg)/3)) # Getting the size of the matrix, to make a square matrix, and as such a square image\n w, h = side, side\n matrix = np.zeros((h, w, 3), dtype=np.uint8)\n for i in range(1,side+1):\n for j in range(1,side+1):\n a = (j-1) + (i-1)*10\n for h in range(1,4):\n matrix[i-1][j-1][h-1] = split[a][h-1] # Place the numbers from the \"split\" list into a matrix suitable for fromarray function\n\n img = Image.fromarray(matrix, 'RGB')\n img.save('Output_image2.png') # Image saved, make it a choice tho\n\n\n\n\n\nname = tk.StringVar()\ntxt_write = tk.Entry(root, relief=tk.GROOVE, bd=1, width=60, textvariable=name)\n\n\n\n\ndef decypher():\n img = Image.open(\"Output_image2.png\") #to give the choice as to where to open it\n imgArray = np.array(img)\n\n b = imgArray.flatten()\n b = list(b)\n ID_R = \" \" # to input from entry\n\n if len(ID_R)%3 != 0:\n loop = 3-(len(ID_R)%3)\n for i in range(loop):\n ID_R += \" \"\n ID_compare = str()\n for i in range(int(len(ID_R))):\n ID_compare += chr(b[i])\n\n if ID_compare == ID_R:\n WOID_L = int(len(ID_R))\n WOID = b[WOID_L:] # Removing ID, as it is not encrypted\n\n n_removed = int(WOID[-1]) # Reads the number of characters to remove at the end\n for i in range(n_removed):\n WOID = WOID[0:-1]\n\n WOID_b= bytes(WOID)\n with open(\"private.txt\", \"r\") as f:\n privateKeyPEM = f.read()\n\n privateKey = rsa.PrivateKey.load_pkcs1(privateKeyPEM.encode('utf8'))\n decMessage = rsa.decrypt(WOID_b, privateKey).decode()\n txt_read.delete(1.0, tk.END)\n txt_read.insert(tk.END, decMessage)\n else:\n txt_read.insert(tk.END, \"Incompatible ID\")\n\n\nleft_entry_grid = tk.Frame(root)\nright_entry_grid = tk.Frame(root)\n\n\nbtn_keygen = tk.Button(left_grid, text=\"Generate keys\", width=30, command=lambda:keygen())\nbtn_get_image = tk.Button(right_grid, text = \"Load encrypted image\", width=30)\nbtn_key_public = tk.Button(left_grid, text=\"Load public keys file\", width=30)\nbtn_key_private = tk.Button(right_grid, text=\"Load private keys file\", width=30)\nbtn_key_cypher = tk.Button(left_grid, text = \"Encrypt message\", width=30, command=lambda:cypher())\nbtn_key_decypher = tk.Button(right_grid, text = \"Decrypt message\", width=30, command=lambda:decypher())\nwrite_label = tk.Label(left_grid, text=\"Write your message here\", width=30, bg=\"black\", fg=\"white\")\nread_label = tk.Label(right_grid, text=\"Your message will be displayed here\", width=30, bg=\"black\", fg=\"white\")\nwrite_code = tk.Entry(left_entry_grid)\nwrite_code_label = tk.Label(left_entry_grid, text=\"ID Code:\")\nread_code = tk.Entry(right_entry_grid)\nread_code_label = tk.Label(right_entry_grid, text=\"ID Code:\")\n\nbtn_keygen.grid(row=0, column=0, sticky=\"ns\")\nbtn_key_public.grid(row=1, column=0, sticky=\"ns\")\nbtn_get_image.grid(row=0, column=0)\nbtn_key_private.grid(row=1, column=0, sticky=\"ns\")\n\nleft_grid.grid(row = 0, column=0, sticky=\"ns\")\nright_grid.grid(row = 0, column=1, sticky=\"ns\")\n\nleft_entry_grid.grid(row = 1, column = 0, sticky=\"ns\")\nright_entry_grid.grid(row = 1, column = 1, sticky=\"ns\")\n\n\nwrite_code.grid(row = 0,column=1)\nread_code.grid(row = 0,column=1)\nwrite_code_label.grid(row = 0,column=0)\nread_code_label.grid(row = 0,column=0)\n\ntxt_write.grid(row = 2,column=0, sticky=\"nsew\", padx=5, pady=5)\ntxt_read.grid(row=2,column=1, sticky=\"nsew\", padx=5, pady=5)\n\nbtn_key_cypher.grid(row=2, column=0, sticky=\"ns\")\nbtn_key_decypher.grid(row=2, column=0, sticky=\"ns\")\nwrite_label.grid(row=3, sticky=\"ns\")\nread_label.grid(row=3, sticky=\"ns\")\n\nroot.mainloop()"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
Magdyedwar1996/python-level-one-codes
|
[
"066086672f43488bc8b32c620b5e2f94cedfe3da"
] |
[
"Numpy/Anather_Functions.py"
] |
[
"import numpy as np \n\n# We create a 2 x 2 ndarray\nX = np.array([[1,2], [3,4]])\n\n# We print x\nprint()\nprint('X = \\n', X)\nprint()\n\nprint('3 * X = \\n', 3 * X)\nprint()\nprint('3 + X = \\n', 3 + X)\nprint()\nprint('X - 3 = \\n', X - 3)\nprint()\nprint('X / 3 = \\n', X / 3)"
] |
[
[
"numpy.array"
]
] |
telesoho/pylab
|
[
"327d828ea18b419bad3d4b96b1625c700c56e55f"
] |
[
"waveform.py"
] |
[
"#coding:utf-8\n\"\"\"\nfilename: mp3-to-waveform.py\n\"\"\"\nfrom pydub import AudioSegment\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n \"\"\"\n 读取mp3片段,并显示波形\n \"\"\"\n song = AudioSegment.from_mp3('data/song.mp3')\n print(\n '样本宽度:', song.sample_width,\n '声道数:', song.channels,\n '采样率:', song.frame_rate,\n '帧宽:', song.frame_width,\n 'song.array_type', song.array_type\n )\n\n wav = song.get_array_of_samples() # 获取声音数据数组\n wav = np.frombuffer(wav, dtype=song.array_type) / 32768.0 # 正则化为(-1, 1)\n\n print(wav[1:10])\n\n full_time = np.arange(0.0, len(wav)/song.frame_rate, 1/song.frame_rate) #生成时间坐标\n\n # 取出中心部分波形\n center = len(wav) / 2 # 中心样本编号\n cuttime = 0.02 # 声音切片长度 [s]\n start = int(center - cuttime/2*song.frame_rate)\n end = int(center + cuttime/2*song.frame_rate)\n\n wav_fft = np.fft.fft(wav[start: end]) # 高速傅立叶变换\n freq_list = np.fft.fftfreq(end - start, d=1.0/song.frame_rate) # 计算出频率\n\n amplitude_spectrum = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in wav_fft] # 计算振幅\n phase_spectrum = [np.arctan2(int(c.imag), int(c.real)) for c in wav_fft] # 计算相位\n\n # 画出波形\n plt.subplot(311) # 3行1列\n plt.plot(full_time[start: end]*1000, wav[start : end])\n plt.xlabel(\"time [ms]\")\n plt.ylabel(\"amplitude\")\n plt.plot()\n\n plt.subplot(312)\n plt.plot(freq_list, amplitude_spectrum, marker= '.', linestyle='-')\n plt.axis([0, song.frame_rate/2, 0, 50])\n plt.xlabel(\"frequency [Hz]\")\n plt.ylabel(\"amplitude spectrum\")\n\n # 位相描画\n plt.subplot(313)\n plt.plot(freq_list, phase_spectrum, marker='.', linestyle='-')\n plt.axis([0, song.frame_rate/2, -np.pi, np.pi])\n plt.xlabel(\"frequency [Hz]\")\n plt.ylabel(\"phase spectrum\")\n\n plt.show()\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.sqrt",
"numpy.fft.fft",
"matplotlib.pyplot.plot",
"numpy.frombuffer",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"numpy.fft.fftfreq",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
ktw361/Local-Mid-Propagation
|
[
"0a99e82cccf8c35bc5f6989af2702203def4c7a4"
] |
[
"mmdet/models/roi_extractors/rfcn_single_level.py"
] |
[
"from __future__ import division\n\nimport torch.nn as nn\nfrom mmcv.cnn import normal_init\n\nfrom mmdet.ops import PSRoIPool\nfrom mmdet.core import force_fp32\nfrom ..registry import ROI_EXTRACTORS\n\n\n@ROI_EXTRACTORS.register_module\nclass RfcnPSRoIExtractor(nn.Module):\n \"\"\"Extract RoI features from a single level feature map.\n\n If there are mulitple input feature levels, each RoI is mapped to a level\n according to its scale.\n\n Args:\n finest_scale (int): Scale threshold of mapping to level 0.\n \"\"\"\n\n def __init__(self,\n featmap_stride,\n num_classes,\n reg_class_agnostic=False,\n pooled_size=7,\n ):\n super(RfcnPSRoIExtractor, self).__init__()\n num_reg_cls = 4 if reg_class_agnostic else 4 * num_classes\n self.cls_psroi_pool = PSRoIPool(\n pooled_size, pooled_size, 1.0 / featmap_stride, pooled_size, num_classes)\n self.loc_psroi_pool = PSRoIPool(\n pooled_size, pooled_size, 1.0 / featmap_stride, pooled_size, num_reg_cls)\n self.featmap_stride = featmap_stride\n self.fp16_enabled = False\n self.ps_cls_conv = nn.Conv2d(\n in_channels=512,\n out_channels=pooled_size*pooled_size*num_classes,\n kernel_size=1)\n self.ps_loc_conv = nn.Conv2d(\n in_channels=512,\n out_channels=pooled_size*pooled_size*num_reg_cls,\n kernel_size=1)\n self.featmap_strides = [self.featmap_stride] # For test mixins\n\n @property\n def num_inputs(self):\n \"\"\"int: Input feature map levels.\"\"\"\n return 1\n\n def init_weights(self):\n normal_init(self.ps_cls_conv, mean=0.0, std=0.01, bias=0.0)\n normal_init(self.ps_loc_conv, mean=0.0, std=0.01, bias=0.0)\n\n @force_fp32(apply_to=('feats', ), out_fp16=True)\n def forward(self, feats, rois):\n assert len(feats) == 1\n cls_feat = self.ps_cls_conv(feats[0])\n loc_feat = self.ps_loc_conv(feats[0])\n cls_roi_feat = self.cls_psroi_pool(cls_feat, rois)\n loc_roi_feat = self.loc_psroi_pool(loc_feat, rois)\n return cls_roi_feat, loc_roi_feat\n"
] |
[
[
"torch.nn.Conv2d"
]
] |
deanrp2/revarie
|
[
"cf9c9e25f11622ec019f0dbf9bfa700f0ae7f10b"
] |
[
"build/lib/test/test_fvariogram.py"
] |
[
"import unittest\nfrom revarie.fvariogram import *\nimport numpy as np\nfrom revarie.models import *\nimport matplotlib.pyplot as plt\n\nclass TestFVariogram(unittest.TestCase):\n def test_poly(self):\n \"\"\"\n Test accuracy of polyfit\n \"\"\"\n x = np.linspace(0,10,100)\n\n for n in range(2,8):\n y = x**n - 2*x**(n-1) + x**(n-2)\n\n fit = fvariogram(source = \"data\",\n method = \"poly\",\n options = [x, y, n])\n self.assertTrue(np.allclose(y, fit(x)))\n\n def test_lin_interp(self):\n \"\"\"\n Test accuracy of linear interpolation\n \"\"\"\n x = np.linspace(0,10,100)\n y = .04*x\n\n fit = fvariogram(source = \"data\",\n method = \"interp\",\n options = [x, y, \"linear\"])\n self.assertTrue(np.allclose(y, fit(x)))\n\n def test_near_interp(self):\n \"\"\"\n Test accuracy of nearest interploation\n \"\"\"\n x = np.linspace(0,10,100)\n def f(x):\n return x**2 + x * np.sin(x)\n y = f(x)\n\n fit = fvariogram(source = \"data\",\n method = \"interp\",\n options = [x,y,\"nearest\"])\n\n xnew = x[:-1] + np.random.uniform(0,1,99)*np.diff(x)\n xnearest = np.array([x[np.abs(x-a).argmin()] for a in xnew])\n self.assertTrue(np.allclose(f(xnearest), fit(xnew)))\n\n def test_bmodel(self):\n \"\"\"\n Test accuracy of built-in model fitting\n \"\"\"\n x = np.linspace(0,10,100)\n y = spherical(x, .2, 4, 6)\n\n fit = fvariogram(source = \"data\",\n method = \"bmodel\",\n options = [x,y,\"sph\"])\n\n self.assertTrue(np.allclose(y, fit(x)))\n\n def test_umodel(self):\n \"\"\"\n Test accuracy of user-specified model fitting\n \"\"\"\n def u(x, v, h):\n return v*np.sqrt(h*x)\n\n x = np.linspace(0,10,100)\n y = u(x, 1.4, 3.2)\n\n fit = fvariogram(source = \"data\",\n method = \"umodel\",\n options = [x,y,u])\n\n self.assertTrue(np.allclose(y, fit(x)))\n\n def test_ufuncs(self):\n \"\"\"\n Test accuracy of user-specified function specification\n \"\"\"\n def u(x):\n return np.sqrt(x)\n\n x = np.linspace(0,10,100)\n yu = u(x)\n\n ufit = fvariogram(source = \"func\",\n method = \"ufunc\",\n options = [u])\n self.assertTrue(np.allclose(yu, ufit(x)))\n\n def test_bfuncs(self):\n \"\"\"\n Test accuracy of built-in function specification\n \"\"\"\n x = np.linspace(0,10,100)\n yb = spherical(x,.2, 4, 6)\n\n bfit = fvariogram(source = \"func\",\n method = \"sph\",\n options = [.2, 4, 6])\n self.assertTrue(np.allclose(yb, bfit(x)))\n\n\n\n\n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"numpy.abs",
"numpy.sin",
"numpy.diff",
"numpy.random.uniform"
]
] |
apoorvagnihotri/airpy
|
[
"9555dca74111b9730bff2cd537f51e77d9702685"
] |
[
"vayu/TheilSen.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 26 01:59:09 2019\n\n@author: Man Vinayaka\n\"\"\"\n\n\ndef TheilSen(df, pollutant):\n \"\"\" Plots a connected scatter plot of the average value of\n the pollutant every month of every year. Then plots a\n line of best fit through the plot showing the user\n the overall trend of the pollutant through the years.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tdf: data frame\n\t\t\tminimally containing date and at least one other\n\t\t\tpollutant \n\t\tpollutant: type string\n\t\t\tA pollutant name correspoinding to \n\t\t\ta variable in a data frame, ex: 'pm25'\n \"\"\"\n import datetime as dt\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import numpy as np\n import pandas as pd\n from numpy import array\n import seaborn as sns\n import scipy\n from scipy import stats\n import math\n\n # =============================================================================\n # df = pd.read_csv(\"mydata.csv\")\n # =============================================================================\n df.index = pd.to_datetime(df.date)\n unique_years = np.unique(df.index.year)\n # df = df[pd.notnull(df[pollutant])]\n\n i = 0\n year = []\n while i < len(unique_years):\n year.append(str(unique_years[i]))\n i = i + 1\n num_unique_years = len(year)\n\n # df = df.drop(\"date\", axis=1)\n # print(df)\n i = 0\n x = 0\n j = 0\n var2 = []\n while i < num_unique_years:\n df_new = df[year[j]].resample(\"1D\").mean()\n df_new = df_new.fillna(method=\"ffill\")\n df_new[\"month\"] = df_new.index.month\n # df_new['day']=df_new.index.dayofweek\n # df_new['hour']=df_new.index.hour\n i = i + 1\n j = j + 1\n x = 0\n while x < 12:\n a = df_new[df_new.month == x]\n mean_var2 = a[pollutant].mean()\n var2.append(mean_var2)\n x = x + 1\n i = 0\n while i < len(var2):\n if pd.notnull(var2[i]) == False:\n var2[i] = (var2[i - 1] + var2[i + 1]) / 2\n i = i + 1\n\n scatterX = []\n t = 0\n while t < num_unique_years:\n r = 0\n while r < 12:\n scatterX.append(t + (r / 12))\n r = r + 1\n t = t + 1\n\n y = var2\n x = scatterX\n\n def best_fit(X, Y):\n\n xbar = sum(X) / len(X)\n ybar = sum(Y) / len(Y)\n n = len(X) # or len(Y)\n\n numer = sum([xi * yi for xi, yi in zip(X, Y)]) - n * xbar * ybar\n denum = sum([xi ** 2 for xi in X]) - n * xbar ** 2\n\n b = numer / denum\n a = ybar - b * xbar\n\n # print('best fit line:\\ny = {:.2f} + {:.2f}x'.format(a, b))\n\n return a, b\n\n a, b = best_fit(x, y)\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # print(len(x))\n ax.plot(x, y, \"-o\")\n ax.set_xlabel(\"Year\")\n ax.set_ylabel(pollutant)\n ax.set_title(\"TheilSen plot\")\n plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color=\"red\")\n plt.show()\n\n\n# =============================================================================\n# df = pd.read_csv(\"mydata.csv\")\n# TheilSen(df, 'o3')\n# =============================================================================\n"
] |
[
[
"numpy.polyfit",
"pandas.notnull",
"pandas.to_datetime",
"numpy.unique",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
ArseniiGav/Competition2-MLHEP21
|
[
"b6ae50b915def8f5b9367e073e64a4bb046b7fb0"
] |
[
"idao/data_module.py"
] |
[
"import pathlib as path\n\nimport pytorch_lightning as pl\nimport torch\nfrom torch.utils.data import DataLoader, random_split\nfrom torchvision import transforms\n\nfrom .dataloader import IDAODataset, img_loader, InferenceDataset\n\n\nclass IDAODataModule(pl.LightningDataModule):\n def __init__(self, data_dir: path.Path, batch_size: int, cfg):\n super().__init__()\n self.data_dir = data_dir\n self.batch_size = batch_size\n self.cfg = cfg\n\n def prepare_data(self):\n # called only on 1 GPU\n self.dataset = IDAODataset(\n root=self.data_dir.joinpath(\"train\"),\n loader=img_loader,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.CenterCrop(120)]\n ),\n # TODO(kazeevn) use idiomatic torch\n target_transform=transforms.Compose(\n [\n lambda num: (\n torch.tensor([0, 1]) if num == 0 else torch.tensor([1, 0])\n )\n ]\n ),\n extensions=self.cfg[\"DATA\"][\"Extension\"],\n )\n\n self.test = InferenceDataset(\n main_dir=self.data_dir.joinpath(\"test\"),\n loader=img_loader,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.CenterCrop(120)]\n ),\n )\n\n\n def setup(self, stage=None):\n # called on every GPU\n self.train, self.val = random_split(\n self.dataset, [10000, 3404], generator=torch.Generator().manual_seed(666)\n )\n\n def train_dataloader(self):\n return DataLoader(self.train, self.batch_size, shuffle=True, num_workers=4)\n\n def val_dataloader(self):\n return DataLoader(self.val, 1, num_workers=0, shuffle=False)\n \n def test_dataloader(self):\n return DataLoader(\n self.test,\n self.batch_size,\n num_workers=0,\n shuffle=False\n )\n\n"
] |
[
[
"torch.Generator",
"torch.utils.data.DataLoader",
"torch.tensor"
]
] |
br5555/reinforcment_learning
|
[
"38c089d24ad0e904d25e9c6a111255f1fc51f265"
] |
[
"Part_1_Deep_Q-Learning/Module_1_Self Driving_Car/map.py"
] |
[
"# Self Driving Car\n\n# Importing the libraries\nimport numpy as np\nfrom random import random, randint\nimport matplotlib.pyplot as plt\nimport time\n\n# Importing the Kivy packages\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.button import Button\nfrom kivy.graphics import Color, Ellipse, Line\nfrom kivy.config import Config\nfrom kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\nfrom kivy.vector import Vector\nfrom kivy.clock import Clock\n\n# Importing the Dqn object from our AI in ai.py\nfrom ai import Dqn\n\n# Adding this line if we don't want the right click to put a red point\nConfig.set('input', 'mouse', 'mouse,multitouch_on_demand')\n\n# Introducing last_x and last_y, used to keep the last point in memory when we draw the sand on the map\nlast_x = 0\nlast_y = 0\nn_points = 0\nlength = 0\n\n# Getting our AI, which we call \"brain\", and that contains our neural network that represents our Q-function\nbrain = Dqn(5, 3, 0.9)\naction2rotation = [0, 20, -20]\nlast_reward = 0\nscores = []\n\n# Initializing the map\nfirst_update = True\n\n\ndef init():\n global sand\n global goal_x\n global goal_y\n global first_update\n sand = np.zeros((longueur, largeur))\n goal_x = 20\n goal_y = largeur - 20\n first_update = False\n\n\n# Initializing the last distance\nlast_distance = 0\n\n\n# Creating the car class\n\nclass Car(Widget):\n angle = NumericProperty(0)\n rotation = NumericProperty(0)\n velocity_x = NumericProperty(0)\n velocity_y = NumericProperty(0)\n velocity = ReferenceListProperty(velocity_x, velocity_y)\n sensor1_x = NumericProperty(0)\n sensor1_y = NumericProperty(0)\n sensor1 = ReferenceListProperty(sensor1_x, sensor1_y)\n sensor2_x = NumericProperty(0)\n sensor2_y = NumericProperty(0)\n sensor2 = ReferenceListProperty(sensor2_x, sensor2_y)\n sensor3_x = NumericProperty(0)\n sensor3_y = NumericProperty(0)\n sensor3 = ReferenceListProperty(sensor3_x, sensor3_y)\n signal1 = NumericProperty(0)\n signal2 = NumericProperty(0)\n signal3 = NumericProperty(0)\n\n def move(self, rotation):\n self.pos = Vector(*self.velocity) + self.pos\n self.rotation = rotation\n self.angle = self.angle + self.rotation\n # (the sensors are the 3 colored squares in front of the car)\n self.sensor1 = Vector(30, 0).rotate(self.angle) + self.pos\n self.sensor2 = Vector(30, 0).rotate((self.angle + 30) % 360) + self.pos\n self.sensor3 = Vector(30, 0).rotate((self.angle - 30) % 360) + self.pos\n self.signal1 = int(np.sum(sand[int(self.sensor1_x) - 10:int(self.sensor1_x) + 10,\n int(self.sensor1_y) - 10:int(self.sensor1_y) + 10])) / 400.\n self.signal2 = int(np.sum(sand[int(self.sensor2_x) - 10:int(self.sensor2_x) + 10,\n int(self.sensor2_y) - 10:int(self.sensor2_y) + 10])) / 400.\n self.signal3 = int(np.sum(sand[int(self.sensor3_x) - 10:int(self.sensor3_x) + 10,\n int(self.sensor3_y) - 10:int(self.sensor3_y) + 10])) / 400.\n if self.sensor1_x > longueur - 10 or self.sensor1_x < 10 or self.sensor1_y > largeur - 10 or self.sensor1_y < 10:\n self.signal1 = 1.\n if self.sensor2_x > longueur - 10 or self.sensor2_x < 10 or self.sensor2_y > largeur - 10 or self.sensor2_y < 10:\n self.signal2 = 1.\n if self.sensor3_x > longueur - 10 or self.sensor3_x < 10 or self.sensor3_y > largeur - 10 or self.sensor3_y < 10:\n self.signal3 = 1.\n\n\nclass Ball1(Widget):\n pass\n\n\nclass Ball2(Widget):\n pass\n\n\nclass Ball3(Widget):\n pass\n\n\n# Creating the game class\n\nclass Game(Widget):\n car = ObjectProperty(None)\n ball1 = ObjectProperty(None)\n ball2 = ObjectProperty(None)\n ball3 = ObjectProperty(None)\n\n def serve_car(self):\n self.car.center = self.center\n self.car.velocity = Vector(6, 0)\n\n def update(self, dt):\n\n global brain\n global last_reward\n global scores\n global last_distance\n global goal_x\n global goal_y\n global longueur\n global largeur\n\n longueur = self.width\n largeur = self.height\n if first_update:\n init()\n\n xx = goal_x - self.car.x\n yy = goal_y - self.car.y\n orientation = Vector(*self.car.velocity).angle((xx, yy)) / 180.\n last_signal = [self.car.signal1, self.car.signal2, self.car.signal3, orientation, -orientation]\n action = brain.update(last_reward, last_signal)\n scores.append(brain.score())\n rotation = action2rotation[action]\n self.car.move(rotation)\n distance = np.sqrt((self.car.x - goal_x) ** 2 + (self.car.y - goal_y) ** 2)\n self.ball1.pos = self.car.sensor1\n self.ball2.pos = self.car.sensor2\n self.ball3.pos = self.car.sensor3\n\n if sand[int(self.car.x), int(self.car.y)] > 0:\n self.car.velocity = Vector(1, 0).rotate(self.car.angle)\n last_reward = -1\n else: # otherwise\n self.car.velocity = Vector(6, 0).rotate(self.car.angle)\n last_reward = -0.2\n if distance < last_distance:\n last_reward = 0.1\n\n if self.car.x < 10:\n self.car.x = 10\n last_reward = -1\n if self.car.x > self.width - 10:\n self.car.x = self.width - 10\n last_reward = -1\n if self.car.y < 10:\n self.car.y = 10\n last_reward = -1\n if self.car.y > self.height - 10:\n self.car.y = self.height - 10\n last_reward = -1\n\n # Updates the goal when the goal is reached (upper left corner to bottom right, from airport to downtown)\n if distance < 100:\n goal_x = self.width - goal_x\n goal_y = self.height - goal_y\n last_distance = distance\n\n\n# Adding the painting tools\n\nclass MyPaintWidget(Widget):\n def on_touch_down(self, touch):\n global length, n_points, last_x, last_y\n with self.canvas:\n Color(0.8, 0.7, 0)\n d = 10.\n touch.ud['line'] = Line(points=(touch.x, touch.y), width=10)\n last_x = int(touch.x)\n last_y = int(touch.y)\n n_points = 0\n length = 0\n sand[int(touch.x), int(touch.y)] = 1\n\n def on_touch_move(self, touch):\n global length, n_points, last_x, last_y\n if touch.button == 'left':\n touch.ud['line'].points += [touch.x, touch.y]\n x = int(touch.x)\n y = int(touch.y)\n length += np.sqrt(max((x - last_x) ** 2 + (y - last_y) ** 2, 2))\n n_points += 1.\n density = n_points / (length)\n touch.ud['line'].width = int(20 * density + 1)\n sand[int(touch.x) - 10: int(touch.x) + 10, int(touch.y) - 10: int(touch.y) + 10] = 1\n last_x = x\n last_y = y\n\n\n# Adding the API Buttons (clear, save and load)\n\nclass CarApp(App):\n def build(self):\n parent = Game()\n parent.serve_car()\n Clock.schedule_interval(parent.update, 1.0 / 60.0)\n self.painter = MyPaintWidget()\n clearbtn = Button(text='clear')\n savebtn = Button(text='save', pos=(parent.width, 0))\n loadbtn = Button(text='load', pos=(2 * parent.width, 0))\n clearbtn.bind(on_release=self.clear_canvas)\n savebtn.bind(on_release=self.save)\n loadbtn.bind(on_release=self.load)\n parent.add_widget(self.painter)\n parent.add_widget(clearbtn)\n parent.add_widget(savebtn)\n parent.add_widget(loadbtn)\n return parent\n\n def clear_canvas(self, obj):\n global sand\n self.painter.canvas.clear()\n sand = np.zeros((longueur, largeur))\n\n def save(self, obj):\n print(\"saving brain...\")\n brain.save()\n plt.plot(scores)\n plt.show()\n\n def load(self, obj):\n print(\"loading last saved brain...\")\n brain.load()\n\n\n# Running the whole thing\nif __name__ == '__main__':\n CarApp().run()\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sqrt"
]
] |
Benjscho/mdptetris-experiments
|
[
"743113bfdcb309c7b9904d6bc5cc5cc65dc4d2e4"
] |
[
"mdptetris_experiments/experiments/analysis/create_graphs.py"
] |
[
"import ast\nimport csv\nimport sys\nfrom argparse import ArgumentParser, Namespace\nfrom typing import List\n\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom run_arg_parser import get_parser\n\nmatplotlib.rcParams['mathtext.fontset'] = 'cm'\nmatplotlib.rcParams['font.family'] = 'STIXGeneral'\nmatplotlib.pyplot.title(r'ABC123 vs $\\mathrm{ABC123}^{123}$')\n\n\nSAVE_DIR = \"/Users/crow/Desktop/bath-msc-main/cm50170-Diss/Dissertation/resources/\"\nROOT_RUN_DIR = \"/Users/crow/Desktop/bath-msc-main/cm50170-Diss/experiment-results/main-runs/run-info-\"\nMBDQN_RUNS = [ROOT_RUN_DIR +\n i for i in [\"20210809T161605Z\", \"20210811T071836Z\"]]\nMBDQN_1PIECE_RUNS = [ROOT_RUN_DIR +\n i for i in [\"20210713T101844Z\", \"20210803T070646Z\"]]\nPPO_RUNS = [ROOT_RUN_DIR + i for i in [\"20210730T093417Z\", \"20210802T205340Z\"]]\nMBDQN_RUNS_SUPPLEMENTARY = [ROOT_RUN_DIR +\n i for i in [\"20210809T092959Z\", \"20210809T095013Z\"]]\nMBDQN_RUNS_SUPPLEMENTARY_2 = [ROOT_RUN_DIR +\n i for i in [\"20210809T112302Z\"]]\nPPO_CLIP_RUNS = [ROOT_RUN_DIR +\n i for i in [\"20210828T071816Z\", \"20210826T082657Z\", \"20210827T100614Z\"]]\n \n\n\ndef get_run_args(run_dirs: List[str]) -> dict:\n \"\"\"\n Method to parse the run arguments saved in MBDQN run directories. \n\n :param run_dirs: List of directories to parse run args for\n\n :return: Returns a dictionary of run arguments \n \"\"\"\n parser = get_parser()\n run_args = {}\n for dir in run_dirs:\n with open(dir + \"/args.txt\") as f:\n ns = f.read()\n args, unknown = parser.parse_known_args(namespace=eval(ns))\n run_args[dir] = args\n return run_args\n\n\ndef read_result_csv(file_path: str) -> List:\n \"\"\"\n Method to read in the first line of a csv to an array. \n \n :param file_path: Path to csv file. \n\n :return: Returns array containing the first line of the csv\n \"\"\"\n with open(file_path, mode='r') as file:\n result = csv.reader(file)\n t = [l for l in result]\n return t[0]\n\n\ndef analyse_MBDQN(run_dirs: List[str], title: str, save_file: str, alpha: float=0.8, grouping: int=20):\n \"\"\"\n Method to take in a list of MBDQN run directories, iterate through them\n to collect run information, and plot the resulting data. \n\n :param run_dirs: list of run result directories\n :param title: Title for the resulting graph\n :param save_file: Name for the save file\n \"\"\"\n run_args = get_run_args(run_dirs)\n run_timesteps = {}\n run_epochs = {}\n\n for dir in run_dirs:\n run_timesteps[dir] = read_result_csv(dir + \"/timesteps.csv\")\n run_epochs[dir] = read_result_csv(dir + \"/epochs.csv\")\n\n for dir in run_dirs:\n print()\n print(f\"{save_file} Hyperparameters:\")\n for key in run_args[dir].__dict__:\n if run_args[dir].__dict__[key]:\n print(f\"{key} & {run_args[dir].__dict__[key]} \\\\\\\\\".capitalize().replace(\n \"_\", \" \"))\n\n plt.figure(save_file)\n plt.title(title)\n for dir in run_dirs:\n df = pd.DataFrame(run_epochs[dir])\n df = df.astype(float)\n df = smooth_data(df, alpha, grouping)\n plt.plot([i*grouping for i in range(len(df))], df,\n label=run_args[dir].state_rep.capitalize())\n plt.legend(title=\"State representation\")\n plt.xlabel(\"Training epoch\")\n plt.ylabel(f\"Average reward per episode, grouped over {grouping} epochs\")\n plt.savefig(SAVE_DIR + save_file + \".png\")\n\n\ndef smooth_data(df: pd.DataFrame, alpha: float = 0.9, grouping: int = 100):\n \"\"\"\n Method to group results of a dataframe and smooth with an alpha factor. \n Attribution: https://stackoverflow.com/a/36810658/14354978\n \"\"\"\n tdf = df.groupby(np.arange(len(df))//grouping).mean()\n return tdf.ewm(alpha=(1 - alpha)).mean()\n\n\ndef analyse_PPO(run_dirs: List[str], title: str, save_file: str):\n \"\"\"\n Method to take in a list of PPO run directories, iterate through them\n to collect run information, and plot the resulting data. \n\n :param run_dirs: list of run result directories\n :param title: Title for the resulting graph\n :param save_file: Name for the save file\n \"\"\"\n run_args = {}\n for dir in run_dirs:\n with open(dir + \"/args.txt\") as f:\n data = f.read()\n run_args[dir] = ast.literal_eval(data)\n\n avg_rewards = {}\n timesteps = {}\n\n for dir in run_dirs:\n timesteps[dir] = pd.read_csv(dir + \"/rewards.csv\")\n\n # Print hyperparams\n for dir in run_dirs:\n print()\n print(f\"{save_file} Hyperparameters:\")\n for key in run_args[dir]:\n if run_args[dir][key]:\n print(\n f\"{key} & {run_args[dir][key]} \\\\\\\\\".capitalize().replace(\"_\", \" \"))\n\n plt.figure(save_file)\n plt.title(title)\n grouping = 1\n for dir in run_dirs:\n df = timesteps[dir]\n df = smooth_data(df, 0.8, grouping)\n plt.plot(df['Step'], df['Value'], label=run_args[dir]['board_height'])\n plt.legend(title=\"Board height\")\n plt.xlabel(\"Time steps\")\n plt.ylabel(f\"Average reward per time step\")\n plt.savefig(SAVE_DIR + save_file + \".png\")\n\n\ndef analyse_PPO_clip(run_dirs: List[str], title: str, save_file: str, limit: int = 50_000_000):\n \"\"\"\n Method to take in a list of PPO run directories, iterate through them\n to collect run information, and plot the resulting data. \n\n :param run_dirs: list of run result directories\n :param title: Title for the resulting graph\n :param save_file: Name for the save file\n \"\"\"\n run_args = {}\n for dir in run_dirs:\n with open(dir + \"/args.txt\") as f:\n data = f.read()\n run_args[dir] = ast.literal_eval(data)\n\n avg_rewards = {}\n timesteps = {}\n\n for dir in run_dirs:\n timesteps[dir] = pd.read_csv(dir + \"/rewards.csv\")\n\n # Print hyperparams\n for dir in run_dirs:\n print()\n print(f\"{save_file} Hyperparameters:\")\n for key in run_args[dir]:\n if run_args[dir][key]:\n print(\n f\"{key} & {run_args[dir][key]} \\\\\\\\\".capitalize().replace(\"_\", \" \"))\n\n plt.figure(save_file)\n plt.title(title)\n grouping = 1\n for dir in run_dirs:\n df = timesteps[dir]\n df = smooth_data(df, 0.8, grouping)\n plt.plot(df['Step'], df['Value'], label=run_args[dir]['clip'])\n plt.legend(title=\"Clip value\")\n plt.xlabel(\"Time steps\")\n plt.xlim([0,limit])\n plt.ylabel(f\"Average reward per time step\")\n plt.savefig(SAVE_DIR + save_file + \".png\")\n\n\ndef main():\n \"\"\"\n Define main method to analyse PPO, and MBDQN runs specified in the global\n vars above. \n \"\"\"\n analyse_PPO(PPO_RUNS, \"PPO Learning rate\", \"ppo\")\n analyse_MBDQN(MBDQN_1PIECE_RUNS,\n \"(a) MBDQN learning rate for single piece episodes\", \"mbdqn-1piece\")\n analyse_MBDQN(\n MBDQN_RUNS, \"(b) MBDQN learning rate on standard Tetris\", \"mbdqn\")\n analyse_MBDQN([MBDQN_RUNS[0]], \"MBDQN learning rate 1D only\", \"mbdqn-1d\", 0.8, 20)\n analyse_MBDQN(MBDQN_RUNS_SUPPLEMENTARY, \"MBDQN learning rate, longer duration (a)\", \"mbdqn-10_000\", 0.8, 20)\n analyse_MBDQN(MBDQN_RUNS_SUPPLEMENTARY_2, \"MBDQN learning rate, longer duration (b)\", \"mbdqn-100_000\", 0.8, 20)\n analyse_PPO_clip(PPO_CLIP_RUNS, \"PPO learning rate as clip value varies\", \"ppo-clip\")\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
shayenne/VoiceDetection
|
[
"5b9ce0950da245fa9488301e3a024b06f363f4db"
] |
[
"metrics.py"
] |
[
"import matplotlib.pyplot as plt#\nimport numpy as np#\nimport sklearn#\nimport os\nimport glob\nimport librosa\nimport pandas as pd\nfrom IPython.display import Audio\nfrom sklearn.externals import joblib#\n\n\ndef zero_rule(vec_labels):\n # Now lets predict the labels of the test data!\n ones = np.ones(len(vec_labels))\n # We can use sklearn to compute the accuracy score\n accuracy = sklearn.metrics.accuracy_score(vec_labels, ones)\n return accuracy\n\n\ndef load_cls_predict(filename, vec_features):\n # Load trained model (RF)\n filename = 'finalized_model_RF_1000_VGGish.sav' \n # load the model from disk\n clf = joblib.load(filename)\n\n # Now lets predict the labels of the test data!\n predictions = clf.predict(test_features)\n # We can use sklearn to compute the accuracy score\n accuracy = sklearn.metrics.accuracy_score(test_labels, predictions)\n return (predictions, accuracy)\n\n\ndef load_scaler_transform(filename, vec_features):\n # Load scaler (SVM)\n filename = '../scaler_VGGish.sav' \n # load the model from disk\n scaler = joblib.load(filename)\n # Transform data\n return scaler.transform(test_features)\n\n\ndef confusion_matrix(vec_labels, predictions):\n # lets compute the show the confusion matrix:\n cm = sklearn.metrics.confusion_matrix(vec_labels, predictions)\n \n return cm\n\n\ndef plot_confusion_matrix(cm, labels): # labels é [absent, present] \n fig, ax = plt.subplots()\n ax.imshow(cm, interpolation='nearest', cmap='gray')\n for i, line in enumerate(cm):\n for j, l in enumerate(line):\n ax.text(j, i, l, size=20, color='green')\n ax.set_xticks(range(len(cm)))\n ax.set_xticklabels(labels)\n ax.set_yticks(range(len(cm)))\n ax.set_yticklabels(labels)\n ax.set_ylabel('True label')\n ax.set_xlabel('Predicted label')\n plt.show()\n \n \n\nif __name__ == \"__main__\":\n \n print (\"I am a function import file =D\")\n"
] |
[
[
"matplotlib.pyplot.subplots",
"sklearn.metrics.confusion_matrix",
"sklearn.externals.joblib.load",
"matplotlib.pyplot.show",
"sklearn.metrics.accuracy_score"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.